[llvm-branch-commits] [llvm] [RISCV][CFI] add function epilogue cfi information (PR #110810)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Oct 2 17:01:04 PDT 2024


https://github.com/dlav-sc updated https://github.com/llvm/llvm-project/pull/110810

>From 4a7903cd37ec4ee28c5fa2e60b9a6959cb5b621a Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Tue, 1 Oct 2024 18:46:38 +0300
Subject: [PATCH 1/7] [RISCV][CFI] add function epilogue cfi information

This patch adds CFI instructions in a function epilogue, that allows
lldb to obtain a valid backtrace at the end of functions.
---
 llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 69 ++++++++++++++++++++
 llvm/lib/Target/RISCV/RISCVFrameLowering.h   |  2 +
 2 files changed, 71 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index fc1a8bf19585a0..872485f37c972b 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -761,11 +761,18 @@ void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
                                          const DebugLoc &DL, uint64_t StackSize,
                                          int64_t CFAOffset) const {
   const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+  const RISCVInstrInfo *TII = STI.getInstrInfo();
 
   Register SPReg = getSPReg(STI);
 
   RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(StackSize),
                 MachineInstr::FrameDestroy, getStackAlign());
+
+  unsigned CFIIndex =
+      MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, CFAOffset));
+  BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+      .addCFIIndex(CFIIndex)
+      .setMIFlag(MachineInstr::FrameDestroy);
 }
 
 void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
@@ -773,6 +780,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
   const RISCVRegisterInfo *RI = STI.getRegisterInfo();
   MachineFrameInfo &MFI = MF.getFrameInfo();
   auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+  const RISCVInstrInfo *TII = STI.getInstrInfo();
   Register FPReg = getFPReg(STI);
   Register SPReg = getSPReg(STI);
 
@@ -826,7 +834,16 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
     if (!RestoreFP) {
       adjustStackForRVV(MF, MBB, LastFrameDestroy, DL, RVVStackSize,
                         MachineInstr::FrameDestroy);
+
+      unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
+          nullptr, RI->getDwarfRegNum(SPReg, true), RealStackSize));
+      BuildMI(MBB, LastFrameDestroy, DL,
+              TII->get(TargetOpcode::CFI_INSTRUCTION))
+          .addCFIIndex(CFIIndex)
+          .setMIFlag(MachineInstr::FrameDestroy);
     }
+
+    emitCalleeSavedRVVEpilogCFI(MBB, LastFrameDestroy);
   }
 
   if (FirstSPAdjustAmount) {
@@ -841,6 +858,13 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
       RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg,
                     StackOffset::getFixed(SecondSPAdjustAmount),
                     MachineInstr::FrameDestroy, getStackAlign());
+
+      unsigned CFIIndex = MF.addFrameInst(
+          MCCFIInstruction::cfiDefCfaOffset(nullptr, FirstSPAdjustAmount));
+      BuildMI(MBB, LastFrameDestroy, DL,
+              TII->get(TargetOpcode::CFI_INSTRUCTION))
+          .addCFIIndex(CFIIndex)
+          .setMIFlag(MachineInstr::FrameDestroy);
     }
   }
 
@@ -860,6 +884,12 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
     RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, FPReg,
                   StackOffset::getFixed(-FPOffset), MachineInstr::FrameDestroy,
                   getStackAlign());
+
+    unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
+        nullptr, RI->getDwarfRegNum(SPReg, true), RealStackSize));
+    BuildMI(MBB, LastFrameDestroy, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+        .addCFIIndex(CFIIndex)
+        .setMIFlag(MachineInstr::FrameDestroy);
   }
 
   bool ApplyPop = RVFI->isPushable(MF) && MBBI != MBB.end() &&
@@ -878,6 +908,23 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
                       /*stack_adj of cm.pop instr*/ RealStackSize - StackSize);
 
     ++MBBI;
+    // Update CFA offset. After CM_POP SP should be equal to CFA, so CFA offset
+    // is zero.
+    unsigned CFIIndex =
+        MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0));
+    BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+        .addCFIIndex(CFIIndex)
+        .setMIFlag(MachineInstr::FrameDestroy);
+  }
+
+  // Recover callee-saved registers.
+  for (const auto &Entry : CSI) {
+    Register Reg = Entry.getReg();
+    unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore(
+        nullptr, RI->getDwarfRegNum(Reg, true)));
+    BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+        .addCFIIndex(CFIIndex)
+        .setMIFlag(MachineInstr::FrameDestroy);
   }
 
   // Deallocate stack if we didn't already do it during cm.pop handling and
@@ -1616,6 +1663,28 @@ void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
   }
 }
 
+void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
+  MachineFunction *MF = MBB.getParent();
+  const MachineFrameInfo &MFI = MF->getFrameInfo();
+  const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+  const TargetInstrInfo &TII = *STI.getInstrInfo();
+  DebugLoc DL = MBB.findDebugLoc(MI);
+
+  const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
+  for (auto &CS : RVVCSI) {
+    int FI = CS.getFrameIdx();
+    if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+      Register Reg = CS.getReg();
+      unsigned CFIIndex = MF->addFrameInst(MCCFIInstruction::createRestore(
+          nullptr, RI->getDwarfRegNum(Reg, true)));
+      BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+          .addCFIIndex(CFIIndex)
+          .setMIFlag(MachineInstr::FrameDestroy);
+    }
+  }
+}
+
 bool RISCVFrameLowering::restoreCalleeSavedRegisters(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
     MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
index d660f3ad67c968..782d2172e2093c 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -91,6 +91,8 @@ class RISCVFrameLowering : public TargetFrameLowering {
   void emitCalleeSavedRVVPrologCFI(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator MI,
                                    bool HasFP) const;
+  void emitCalleeSavedRVVEpilogCFI(MachineBasicBlock &MBB,
+                                   MachineBasicBlock::iterator MI) const;
   void deallocateStack(MachineFunction &MF, MachineBasicBlock &MBB,
                        MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
                        uint64_t StackSize, int64_t CFAOffset) const;

>From 849d63e3b33059a4a0d452c83db3ad952ff26254 Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Tue, 1 Oct 2024 22:20:49 +0300
Subject: [PATCH 2/7] [RISCV] update tests

---
 .../GlobalISel/stacksave-stackrestore.ll      |  10 +
 llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll  |  36 ++
 llvm/test/CodeGen/RISCV/addrspacecast.ll      |   4 +
 .../CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll |  85 ++++
 .../CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll |  85 ++++
 llvm/test/CodeGen/RISCV/branch-relaxation.ll  | 144 ++++++
 llvm/test/CodeGen/RISCV/callee-saved-gprs.ll  |  96 +++-
 .../test/CodeGen/RISCV/calling-conv-ilp32e.ll | 271 +++++++++++
 llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll       |  12 +-
 llvm/test/CodeGen/RISCV/double-intrinsics.ll  |   8 +
 llvm/test/CodeGen/RISCV/double-round-conv.ll  |  50 ++
 .../early-clobber-tied-def-subreg-liveness.ll |   2 +
 llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll       |   4 +
 .../RISCV/exception-pointer-register.ll       |   8 +
 llvm/test/CodeGen/RISCV/float-intrinsics.ll   |   8 +
 llvm/test/CodeGen/RISCV/float-round-conv.ll   |  40 ++
 llvm/test/CodeGen/RISCV/fpclamptosat.ll       | 176 +++++++
 llvm/test/CodeGen/RISCV/frame-info.ll         |  32 ++
 .../test/CodeGen/RISCV/half-convert-strict.ll |  10 +
 llvm/test/CodeGen/RISCV/half-intrinsics.ll    |  20 +
 llvm/test/CodeGen/RISCV/half-round-conv.ll    |  80 ++++
 .../CodeGen/RISCV/hwasan-check-memaccess.ll   |   2 +
 .../RISCV/intrinsic-cttz-elts-vscale.ll       |   3 +
 llvm/test/CodeGen/RISCV/kcfi-mir.ll           |   2 +
 llvm/test/CodeGen/RISCV/large-stack.ll        |  15 +
 llvm/test/CodeGen/RISCV/live-sp.mir           |   2 +
 llvm/test/CodeGen/RISCV/llvm.exp10.ll         | 120 +++++
 .../RISCV/local-stack-slot-allocation.ll      |  14 +
 llvm/test/CodeGen/RISCV/lpad.ll               |   8 +
 llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll |   5 +
 llvm/test/CodeGen/RISCV/nontemporal.ll        |  60 +++
 .../test/CodeGen/RISCV/overflow-intrinsics.ll |  23 +
 llvm/test/CodeGen/RISCV/pr58025.ll            |   1 +
 llvm/test/CodeGen/RISCV/pr58286.ll            |   4 +
 llvm/test/CodeGen/RISCV/pr63365.ll            |   1 +
 llvm/test/CodeGen/RISCV/pr69586.ll            |  29 ++
 llvm/test/CodeGen/RISCV/pr88365.ll            |   3 +
 llvm/test/CodeGen/RISCV/prolog-epilogue.ll    |  80 ++++
 llvm/test/CodeGen/RISCV/push-pop-opt-crash.ll |   4 +
 llvm/test/CodeGen/RISCV/push-pop-popret.ll    | 388 ++++++++++++++--
 ...regalloc-last-chance-recoloring-failure.ll |   8 +
 llvm/test/CodeGen/RISCV/rv64-patchpoint.ll    |   3 +
 .../RISCV/rv64-statepoint-call-lowering.ll    |  21 +
 llvm/test/CodeGen/RISCV/rvv-cfi-info.ll       |  18 +
 llvm/test/CodeGen/RISCV/rvv/abs-vp.ll         |   2 +
 .../RISCV/rvv/access-fixed-objects-by-rvv.ll  |   3 +
 .../RISCV/rvv/addi-scalable-offset.mir        |   4 +
 .../rvv/alloca-load-store-scalable-array.ll   |   2 +
 .../rvv/alloca-load-store-scalable-struct.ll  |   2 +
 .../rvv/alloca-load-store-vector-tuple.ll     |   6 +
 llvm/test/CodeGen/RISCV/rvv/binop-splats.ll   |   5 +
 .../CodeGen/RISCV/rvv/bitreverse-sdnode.ll    |   5 +
 llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll  |  20 +
 llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll   |   5 +
 llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll       |  21 +
 .../CodeGen/RISCV/rvv/callee-saved-regs.ll    |   4 +
 .../CodeGen/RISCV/rvv/calling-conv-fastcc.ll  |  40 ++
 llvm/test/CodeGen/RISCV/rvv/calling-conv.ll   |  16 +
 llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll        |  10 +
 llvm/test/CodeGen/RISCV/rvv/compressstore.ll  |   2 +
 llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll       |   6 +
 llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll        |   8 +
 .../test/CodeGen/RISCV/rvv/emergency-slot.mir |  14 +
 llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll  |  16 +
 .../CodeGen/RISCV/rvv/extractelt-int-rv32.ll  |   8 +
 .../CodeGen/RISCV/rvv/extractelt-int-rv64.ll  |   8 +
 .../rvv/fixed-vector-i8-index-cornercase.ll   |   8 +
 .../RISCV/rvv/fixed-vectors-binop-splats.ll   |   4 +
 .../RISCV/rvv/fixed-vectors-bitcast.ll        |   9 +
 .../RISCV/rvv/fixed-vectors-bitreverse-vp.ll  |  20 +
 .../RISCV/rvv/fixed-vectors-bitreverse.ll     |   2 +
 .../RISCV/rvv/fixed-vectors-bswap-vp.ll       |  20 +
 .../CodeGen/RISCV/rvv/fixed-vectors-bswap.ll  |   2 +
 .../rvv/fixed-vectors-calling-conv-fastcc.ll  |  12 +
 .../RISCV/rvv/fixed-vectors-calling-conv.ll   |  14 +
 .../RISCV/rvv/fixed-vectors-ceil-vp.ll        |   2 +
 .../RISCV/rvv/fixed-vectors-ctlz-vp.ll        |  24 +
 .../RISCV/rvv/fixed-vectors-ctpop-vp.ll       |  12 +
 .../RISCV/rvv/fixed-vectors-cttz-vp.ll        |  24 +
 .../RISCV/rvv/fixed-vectors-expandload-int.ll |   1 +
 .../RISCV/rvv/fixed-vectors-floor-vp.ll       |   2 +
 .../RISCV/rvv/fixed-vectors-fmaximum-vp.ll    |   6 +
 .../RISCV/rvv/fixed-vectors-fminimum-vp.ll    |   6 +
 .../rvv/fixed-vectors-fp-buildvec-bf16.ll     |   6 +
 .../RISCV/rvv/fixed-vectors-fp-buildvec.ll    | 100 ++++
 .../RISCV/rvv/fixed-vectors-fp-interleave.ll  |   2 +
 .../RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll   |   4 +
 .../rvv/fixed-vectors-insert-subvector.ll     |  16 +
 .../CodeGen/RISCV/rvv/fixed-vectors-insert.ll |   8 +
 .../RISCV/rvv/fixed-vectors-int-buildvec.ll   |  14 +
 .../rvv/fixed-vectors-int-explodevector.ll    |  29 ++
 .../RISCV/rvv/fixed-vectors-int-interleave.ll |   2 +
 .../RISCV/rvv/fixed-vectors-int-splat.ll      |   3 +
 .../CodeGen/RISCV/rvv/fixed-vectors-int.ll    |   2 +
 .../rvv/fixed-vectors-interleaved-access.ll   |   4 +
 .../CodeGen/RISCV/rvv/fixed-vectors-llrint.ll |  43 ++
 .../CodeGen/RISCV/rvv/fixed-vectors-lrint.ll  |  28 ++
 .../RISCV/rvv/fixed-vectors-masked-gather.ll  |  65 +++
 .../RISCV/rvv/fixed-vectors-masked-scatter.ll |  59 +++
 .../RISCV/rvv/fixed-vectors-reduction-fp.ll   |  16 +
 .../rvv/fixed-vectors-reduction-int-vp.ll     |  44 ++
 .../RISCV/rvv/fixed-vectors-reduction-int.ll  |   8 +
 .../RISCV/rvv/fixed-vectors-rint-vp.ll        |   2 +
 .../RISCV/rvv/fixed-vectors-round-vp.ll       |   2 +
 .../RISCV/rvv/fixed-vectors-roundeven-vp.ll   |   2 +
 .../RISCV/rvv/fixed-vectors-roundtozero-vp.ll |   2 +
 .../RISCV/rvv/fixed-vectors-setcc-fp-vp.ll    |  12 +
 .../RISCV/rvv/fixed-vectors-setcc-int-vp.ll   |  22 +
 .../rvv/fixed-vectors-shuffle-vslide1down.ll  |   2 +
 .../rvv/fixed-vectors-shuffle-vslide1up.ll    |   2 +
 .../RISCV/rvv/fixed-vectors-trunc-vp.ll       |   2 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll |   2 +
 .../RISCV/rvv/fixed-vectors-vadd-vp.ll        |   8 +
 .../RISCV/rvv/fixed-vectors-vand-vp.ll        |  10 +
 .../RISCV/rvv/fixed-vectors-vcopysign-vp.ll   |   2 +
 .../RISCV/rvv/fixed-vectors-vdiv-vp.ll        |   8 +
 .../RISCV/rvv/fixed-vectors-vdivu-vp.ll       |   8 +
 .../RISCV/rvv/fixed-vectors-vfma-vp.ll        |   4 +
 .../RISCV/rvv/fixed-vectors-vfmax-vp.ll       |   2 +
 .../RISCV/rvv/fixed-vectors-vfmin-vp.ll       |   2 +
 .../RISCV/rvv/fixed-vectors-vfmuladd-vp.ll    |   4 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll |   4 +
 .../RISCV/rvv/fixed-vectors-vfwmaccbf16.ll    |  20 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll |   4 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll |   4 +
 ...fixed-vectors-vitofp-constrained-sdnode.ll |   3 +
 .../RISCV/rvv/fixed-vectors-vmacc-vp.ll       |   9 +
 .../RISCV/rvv/fixed-vectors-vmax-vp.ll        |   8 +
 .../RISCV/rvv/fixed-vectors-vmaxu-vp.ll       |   8 +
 .../RISCV/rvv/fixed-vectors-vmin-vp.ll        |   8 +
 .../RISCV/rvv/fixed-vectors-vminu-vp.ll       |   8 +
 .../RISCV/rvv/fixed-vectors-vmul-vp.ll        |   8 +
 .../RISCV/rvv/fixed-vectors-vnmsac-vp.ll      |   9 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll |   8 +
 .../RISCV/rvv/fixed-vectors-vp-splat.ll       |   4 +
 .../RISCV/rvv/fixed-vectors-vpmerge.ll        |  10 +
 .../RISCV/rvv/fixed-vectors-vpscatter.ll      |   8 +
 .../RISCV/rvv/fixed-vectors-vrem-vp.ll        |   8 +
 .../RISCV/rvv/fixed-vectors-vremu-vp.ll       |   8 +
 .../RISCV/rvv/fixed-vectors-vrsub-vp.ll       |   8 +
 .../RISCV/rvv/fixed-vectors-vsadd-vp.ll       |   8 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll  |   4 +
 .../RISCV/rvv/fixed-vectors-vsaddu-vp.ll      |   8 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll |   4 +
 .../RISCV/rvv/fixed-vectors-vscale-range.ll   |   2 +
 .../RISCV/rvv/fixed-vectors-vselect-vp.ll     |  10 +
 .../RISCV/rvv/fixed-vectors-vssub-vp.ll       |   8 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vssub.ll  |   4 +
 .../RISCV/rvv/fixed-vectors-vssubu-vp.ll      |   8 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll |   4 +
 .../RISCV/rvv/fixed-vectors-vsub-vp.ll        |   8 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll  |   7 +
 .../RISCV/rvv/fixed-vectors-vwmulsu.ll        |   9 +
 .../CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll |   6 +
 .../RISCV/rvv/fixed-vectors-vxor-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/floor-vp.ll       |  10 +
 .../test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll |   4 +
 llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll    |  24 +
 .../test/CodeGen/RISCV/rvv/fminimum-sdnode.ll |   4 +
 llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll    |  24 +
 .../CodeGen/RISCV/rvv/fnearbyint-sdnode.ll    |   4 +
 .../CodeGen/RISCV/rvv/fpclamptosat_vec.ll     | 432 ++++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll   |  24 +
 .../CodeGen/RISCV/rvv/get-vlen-debugloc.mir   |   1 +
 .../CodeGen/RISCV/rvv/known-never-zero.ll     |   2 +
 .../RISCV/rvv/large-rvv-stack-size.mir        |   4 +
 llvm/test/CodeGen/RISCV/rvv/localvar.ll       |  27 ++
 llvm/test/CodeGen/RISCV/rvv/memory-args.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll |   2 +
 .../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll |   2 +
 .../RISCV/rvv/named-vector-shuffle-reverse.ll |   8 +
 llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll   |   8 +
 .../CodeGen/RISCV/rvv/no-reserved-frame.ll    |   5 +
 llvm/test/CodeGen/RISCV/rvv/pr104480.ll       |   2 +
 llvm/test/CodeGen/RISCV/rvv/pr88576.ll        |   4 +
 llvm/test/CodeGen/RISCV/rvv/pr93587.ll        |   1 +
 llvm/test/CodeGen/RISCV/rvv/pr95865.ll        |  14 +
 .../CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll |   6 +
 llvm/test/CodeGen/RISCV/rvv/remat.ll          |  18 +
 llvm/test/CodeGen/RISCV/rvv/rint-vp.ll        |  10 +
 llvm/test/CodeGen/RISCV/rvv/round-vp.ll       |  10 +
 llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll   |  10 +
 llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll |  10 +
 .../test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll |   5 +
 llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll    |   6 +
 llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll   |  40 ++
 llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll  |  20 +
 .../rvv/splat-vector-split-i64-vl-sdnode.ll   |   1 +
 llvm/test/CodeGen/RISCV/rvv/stack-folding.ll  |  24 +
 llvm/test/CodeGen/RISCV/rvv/stepvector.ll     |   6 +
 .../test/CodeGen/RISCV/rvv/strided-vpstore.ll |   2 +
 .../RISCV/rvv/undef-earlyclobber-chain.ll     |   1 +
 llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll  |   2 +
 llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vand-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll   |  16 +
 llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll    |   8 +
 llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll   |   8 +
 llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll       |   8 +
 .../RISCV/rvv/vector-deinterleave-load.ll     |   2 +
 .../CodeGen/RISCV/rvv/vector-deinterleave.ll  |   6 +
 .../RISCV/rvv/vector-interleave-store.ll      |   2 +
 .../CodeGen/RISCV/rvv/vector-interleave.ll    |   8 +
 llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll       |  16 +
 llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll       |  16 +
 llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll        | 124 +++++
 .../RISCV/rvv/vfmadd-constrained-sdnode.ll    |  16 +
 llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll  |  16 +
 llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll       |   8 +
 .../RISCV/rvv/vfmsub-constrained-sdnode.ll    |   8 +
 llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll    |   4 +
 .../RISCV/rvv/vfnmadd-constrained-sdnode.ll   |   4 +
 .../RISCV/rvv/vfnmsub-constrained-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll     |   2 +
 llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll     |   2 +
 llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll       |  16 +
 llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll     |   2 +
 llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll    |   2 +
 llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll    |   2 +
 llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll       |  12 +
 llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll       |  12 +
 llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll      |  12 +
 llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll  |   4 +
 llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll     |   4 +
 llvm/test/CodeGen/RISCV/rvv/vor-vp.ll         |   8 +
 llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll |   4 +
 llvm/test/CodeGen/RISCV/rvv/vp-splat.ll       |   4 +
 llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll |   6 +
 .../CodeGen/RISCV/rvv/vpscatter-sdnode.ll     |   6 +
 llvm/test/CodeGen/RISCV/rvv/vpstore.ll        |   2 +
 .../CodeGen/RISCV/rvv/vreductions-int-vp.ll   |  30 ++
 llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll    |   8 +
 llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll   |   8 +
 llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll  |   4 +
 llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll      |   8 +
 llvm/test/CodeGen/RISCV/rvv/vselect-int.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll     |   6 +
 llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll     |   2 +
 llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll    |   6 +
 llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll   |   4 +
 llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll       |   8 +
 llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll  |   4 +
 llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll      |   8 +
 llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll      |   2 +
 llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll     |   2 +
 llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll   |   2 +
 llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll    |   4 +
 llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll        |   8 +
 llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir |   2 +
 llvm/test/CodeGen/RISCV/shadowcallstack.ll    |  28 ++
 llvm/test/CodeGen/RISCV/shl-cttz.ll           |  16 +
 .../CodeGen/RISCV/shrinkwrap-jump-table.ll    |   2 +
 .../CodeGen/RISCV/stack-inst-compress.mir     |  36 ++
 llvm/test/CodeGen/RISCV/stack-offset.ll       |  48 ++
 ...realignment-with-variable-sized-objects.ll |  20 +
 llvm/test/CodeGen/RISCV/stack-realignment.ll  | 212 +++++++++
 llvm/test/CodeGen/RISCV/vararg-ilp32e.ll      |  12 +
 llvm/test/CodeGen/RISCV/vararg.ll             |  38 ++
 llvm/test/CodeGen/RISCV/vlenb.ll              |   3 +
 llvm/test/CodeGen/RISCV/xaluo.ll              |  27 ++
 llvm/test/CodeGen/RISCV/xcvbi.ll              |   6 +
 llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll    |  10 +
 .../CodeGen/RISCV/zcmp-additional-stack.ll    |   8 +-
 llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir   |  46 +-
 llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir  |  82 ++++
 .../RISCV/zcmp-prolog-epilog-crash.mir        |   1 +
 llvm/test/CodeGen/RISCV/zcmp-with-float.ll    |  20 +-
 llvm/test/CodeGen/RISCV/zdinx-large-spill.mir |   1 +
 .../test/DebugInfo/RISCV/relax-debug-frame.ll |  22 +-
 295 files changed, 5185 insertions(+), 70 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/stacksave-stackrestore.ll b/llvm/test/CodeGen/RISCV/GlobalISel/stacksave-stackrestore.ll
index 014283cf38b26b..caa749729ce198 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/stacksave-stackrestore.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/stacksave-stackrestore.ll
@@ -25,10 +25,15 @@ define void @test_scoped_alloca(i64 %n) {
 ; RV32-NEXT:    call use_addr
 ; RV32-NEXT:    mv sp, s1
 ; RV32-NEXT:    addi sp, s0, -16
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_scoped_alloca:
@@ -51,10 +56,15 @@ define void @test_scoped_alloca(i64 %n) {
 ; RV64-NEXT:    call use_addr
 ; RV64-NEXT:    mv sp, s1
 ; RV64-NEXT:    addi sp, s0, -32
+; RV64-NEXT:    .cfi_def_cfa sp, 32
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
 ; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %sp = call ptr @llvm.stacksave.p0()
   %addr = alloca i8, i64 %n
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index 01cab0d0e157bd..5ea87499d2c855 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -59,6 +59,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lw a0, 0(a0)
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: va1:
@@ -84,6 +85,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; RV64-NEXT:    sw a2, 12(sp)
 ; RV64-NEXT:    lw a0, 0(a0)
 ; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-WITHFP-LABEL: va1:
@@ -111,7 +113,10 @@ define i32 @va1(ptr %fmt, ...) {
 ; RV32-WITHFP-NEXT:    lw a0, 0(a0)
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 48
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: va1:
@@ -144,7 +149,10 @@ define i32 @va1(ptr %fmt, ...) {
 ; RV64-WITHFP-NEXT:    lw a0, 0(a0)
 ; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 96
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
   %va = alloca ptr
   call void @llvm.va_start(ptr %va)
@@ -1588,6 +1596,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; RV32-NEXT:    lui a1, 24414
 ; RV32-NEXT:    addi a1, a1, 304
 ; RV32-NEXT:    add sp, sp, a1
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: va_large_stack:
@@ -1633,6 +1642,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; RV64-NEXT:    lui a1, 24414
 ; RV64-NEXT:    addiw a1, a1, 336
 ; RV64-NEXT:    add sp, sp, a1
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-WITHFP-LABEL: va_large_stack:
@@ -1667,9 +1677,13 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; RV32-WITHFP-NEXT:    lui a1, 24414
 ; RV32-WITHFP-NEXT:    addi a1, a1, -1728
 ; RV32-WITHFP-NEXT:    add sp, sp, a1
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-WITHFP-NEXT:    lw ra, 1996(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 1992(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 2032
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: va_large_stack:
@@ -1709,9 +1723,13 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; RV64-WITHFP-NEXT:    lui a1, 24414
 ; RV64-WITHFP-NEXT:    addiw a1, a1, -1680
 ; RV64-WITHFP-NEXT:    add sp, sp, a1
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-WITHFP-NEXT:    ld ra, 1960(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 1952(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 2032
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
   %large = alloca [ 100000000 x i8 ]
   %va = alloca ptr
@@ -1739,6 +1757,7 @@ define iXLen @va_vprintf(ptr %fmt, ptr %arg_start) {
 ; RV32-NEXT:    sw a1, 8(sp)
 ; RV32-NEXT:    lw a0, 0(a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: va_vprintf:
@@ -1755,6 +1774,7 @@ define iXLen @va_vprintf(ptr %fmt, ptr %arg_start) {
 ; RV64-NEXT:    sd a1, 0(sp)
 ; RV64-NEXT:    ld a0, 0(a0)
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-WITHFP-LABEL: va_vprintf:
@@ -1778,7 +1798,10 @@ define iXLen @va_vprintf(ptr %fmt, ptr %arg_start) {
 ; RV32-WITHFP-NEXT:    lw a0, 0(a0)
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: va_vprintf:
@@ -1802,7 +1825,10 @@ define iXLen @va_vprintf(ptr %fmt, ptr %arg_start) {
 ; RV64-WITHFP-NEXT:    ld a0, 0(a0)
 ; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 32
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
   %args = alloca ptr
   %args_cp = alloca ptr
@@ -1832,7 +1858,9 @@ define i32 @va_printf(ptr %fmt, ...) {
 ; RV32-NEXT:    sw a7, 44(sp)
 ; RV32-NEXT:    call va_vprintf
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: va_printf:
@@ -1853,7 +1881,9 @@ define i32 @va_printf(ptr %fmt, ...) {
 ; RV64-NEXT:    sd a7, 72(sp)
 ; RV64-NEXT:    call va_vprintf
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-WITHFP-LABEL: va_printf:
@@ -1879,7 +1909,10 @@ define i32 @va_printf(ptr %fmt, ...) {
 ; RV32-WITHFP-NEXT:    call va_vprintf
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 48
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: va_printf:
@@ -1905,7 +1938,10 @@ define i32 @va_printf(ptr %fmt, ...) {
 ; RV64-WITHFP-NEXT:    call va_vprintf
 ; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 96
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
   %args = alloca ptr
   call void @llvm.va_start(ptr %args)
diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll
index e55a57a5167822..80a0efb043ebdf 100644
--- a/llvm/test/CodeGen/RISCV/addrspacecast.ll
+++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll
@@ -28,7 +28,9 @@ define void @cast1(ptr %ptr) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call foo
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cast1:
@@ -39,7 +41,9 @@ define void @cast1(ptr %ptr) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call foo
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
   %castptr = addrspacecast ptr %ptr to ptr addrspace(10)
   call void @foo(ptr addrspace(10) %castptr)
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
index 0d6ae3a51e2469..f22115130117a8 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
@@ -51,7 +51,12 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_cond_i8:
@@ -129,7 +134,12 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_cond_i8:
@@ -216,7 +226,13 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
+; RV32I-NEXT:    .cfi_restore s3
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_cond_i16:
@@ -300,7 +316,13 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
+; RV64I-NEXT:    .cfi_restore s3
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_cond_i16:
@@ -378,7 +400,11 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_cond_i32:
@@ -442,7 +468,12 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_cond_i32:
@@ -529,7 +560,12 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_cond_i64:
@@ -586,7 +622,12 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
 ; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    .cfi_restore ra
+; RV32IA-NEXT:    .cfi_restore s0
+; RV32IA-NEXT:    .cfi_restore s1
+; RV32IA-NEXT:    .cfi_restore s2
 ; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_usub_cond_i64:
@@ -621,7 +662,11 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_cond_i64:
@@ -686,7 +731,11 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_sat_i8:
@@ -759,7 +808,11 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_sat_i8:
@@ -841,7 +894,12 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_sat_i16:
@@ -920,7 +978,12 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_sat_i16:
@@ -997,7 +1060,11 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_sat_i32:
@@ -1057,7 +1124,11 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_sat_i32:
@@ -1142,7 +1213,12 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_usub_sat_i64:
@@ -1198,7 +1274,12 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
 ; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    .cfi_restore ra
+; RV32IA-NEXT:    .cfi_restore s0
+; RV32IA-NEXT:    .cfi_restore s1
+; RV32IA-NEXT:    .cfi_restore s2
 ; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_usub_sat_i64:
@@ -1233,7 +1314,11 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_usub_sat_i64:
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index 927e778c9dd9c2..14a1e6b9827174 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -47,7 +47,11 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_uinc_wrap_i8:
@@ -121,7 +125,11 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_uinc_wrap_i8:
@@ -204,7 +212,12 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_uinc_wrap_i16:
@@ -284,7 +297,12 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_uinc_wrap_i16:
@@ -362,7 +380,11 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_uinc_wrap_i32:
@@ -422,7 +444,11 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_uinc_wrap_i32:
@@ -507,7 +533,12 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_uinc_wrap_i64:
@@ -562,7 +593,12 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    .cfi_restore ra
+; RV32IA-NEXT:    .cfi_restore s0
+; RV32IA-NEXT:    .cfi_restore s1
+; RV32IA-NEXT:    .cfi_restore s2
 ; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_uinc_wrap_i64:
@@ -597,7 +633,11 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_uinc_wrap_i64:
@@ -674,7 +714,12 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_udec_wrap_i8:
@@ -770,7 +815,12 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_udec_wrap_i8:
@@ -875,7 +925,13 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
+; RV32I-NEXT:    .cfi_restore s3
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_udec_wrap_i16:
@@ -977,7 +1033,13 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
+; RV64I-NEXT:    .cfi_restore s3
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_udec_wrap_i16:
@@ -1073,7 +1135,11 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_udec_wrap_i32:
@@ -1155,7 +1221,12 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_udec_wrap_i32:
@@ -1258,7 +1329,12 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_udec_wrap_i64:
@@ -1321,7 +1397,12 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    .cfi_restore ra
+; RV32IA-NEXT:    .cfi_restore s0
+; RV32IA-NEXT:    .cfi_restore s1
+; RV32IA-NEXT:    .cfi_restore s2
 ; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_udec_wrap_i64:
@@ -1364,7 +1445,11 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IA-LABEL: atomicrmw_udec_wrap_i64:
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
index ec77d54da116d3..8da0ec7a5f2d74 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
@@ -306,7 +306,21 @@ define void @relax_jal_spill_32() {
 ; CHECK-RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore ra
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
+; CHECK-RV32-NEXT:    .cfi_restore s2
+; CHECK-RV32-NEXT:    .cfi_restore s3
+; CHECK-RV32-NEXT:    .cfi_restore s4
+; CHECK-RV32-NEXT:    .cfi_restore s5
+; CHECK-RV32-NEXT:    .cfi_restore s6
+; CHECK-RV32-NEXT:    .cfi_restore s7
+; CHECK-RV32-NEXT:    .cfi_restore s8
+; CHECK-RV32-NEXT:    .cfi_restore s9
+; CHECK-RV32-NEXT:    .cfi_restore s10
+; CHECK-RV32-NEXT:    .cfi_restore s11
 ; CHECK-RV32-NEXT:    addi sp, sp, 64
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: relax_jal_spill_32:
@@ -534,7 +548,21 @@ define void @relax_jal_spill_32() {
 ; CHECK-RV64-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore ra
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
+; CHECK-RV64-NEXT:    .cfi_restore s2
+; CHECK-RV64-NEXT:    .cfi_restore s3
+; CHECK-RV64-NEXT:    .cfi_restore s4
+; CHECK-RV64-NEXT:    .cfi_restore s5
+; CHECK-RV64-NEXT:    .cfi_restore s6
+; CHECK-RV64-NEXT:    .cfi_restore s7
+; CHECK-RV64-NEXT:    .cfi_restore s8
+; CHECK-RV64-NEXT:    .cfi_restore s9
+; CHECK-RV64-NEXT:    .cfi_restore s10
+; CHECK-RV64-NEXT:    .cfi_restore s11
 ; CHECK-RV64-NEXT:    addi sp, sp, 128
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 
   %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
@@ -825,6 +853,7 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV32-NEXT:    # reg use t6
 ; CHECK-RV32-NEXT:    #NO_APP
 ; CHECK-RV32-NEXT:    addi sp, s0, -2032
+; CHECK-RV32-NEXT:    .cfi_def_cfa sp, 2032
 ; CHECK-RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
@@ -838,7 +867,21 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV32-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore ra
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
+; CHECK-RV32-NEXT:    .cfi_restore s2
+; CHECK-RV32-NEXT:    .cfi_restore s3
+; CHECK-RV32-NEXT:    .cfi_restore s4
+; CHECK-RV32-NEXT:    .cfi_restore s5
+; CHECK-RV32-NEXT:    .cfi_restore s6
+; CHECK-RV32-NEXT:    .cfi_restore s7
+; CHECK-RV32-NEXT:    .cfi_restore s8
+; CHECK-RV32-NEXT:    .cfi_restore s9
+; CHECK-RV32-NEXT:    .cfi_restore s10
+; CHECK-RV32-NEXT:    .cfi_restore s11
 ; CHECK-RV32-NEXT:    addi sp, sp, 2032
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: relax_jal_spill_32_adjust_spill_slot:
@@ -1071,6 +1114,7 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    # reg use t6
 ; CHECK-RV64-NEXT:    #NO_APP
 ; CHECK-RV64-NEXT:    addi sp, s0, -2032
+; CHECK-RV64-NEXT:    .cfi_def_cfa sp, 2032
 ; CHECK-RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
@@ -1084,7 +1128,21 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore ra
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
+; CHECK-RV64-NEXT:    .cfi_restore s2
+; CHECK-RV64-NEXT:    .cfi_restore s3
+; CHECK-RV64-NEXT:    .cfi_restore s4
+; CHECK-RV64-NEXT:    .cfi_restore s5
+; CHECK-RV64-NEXT:    .cfi_restore s6
+; CHECK-RV64-NEXT:    .cfi_restore s7
+; CHECK-RV64-NEXT:    .cfi_restore s8
+; CHECK-RV64-NEXT:    .cfi_restore s9
+; CHECK-RV64-NEXT:    .cfi_restore s10
+; CHECK-RV64-NEXT:    .cfi_restore s11
 ; CHECK-RV64-NEXT:    addi sp, sp, 2032
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 
   ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
@@ -1489,7 +1547,21 @@ define void @relax_jal_spill_64() {
 ; CHECK-RV32-NEXT:    lw s9, 228(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s10, 224(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s11, 220(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore ra
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
+; CHECK-RV32-NEXT:    .cfi_restore s2
+; CHECK-RV32-NEXT:    .cfi_restore s3
+; CHECK-RV32-NEXT:    .cfi_restore s4
+; CHECK-RV32-NEXT:    .cfi_restore s5
+; CHECK-RV32-NEXT:    .cfi_restore s6
+; CHECK-RV32-NEXT:    .cfi_restore s7
+; CHECK-RV32-NEXT:    .cfi_restore s8
+; CHECK-RV32-NEXT:    .cfi_restore s9
+; CHECK-RV32-NEXT:    .cfi_restore s10
+; CHECK-RV32-NEXT:    .cfi_restore s11
 ; CHECK-RV32-NEXT:    addi sp, sp, 272
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: relax_jal_spill_64:
@@ -1715,7 +1787,21 @@ define void @relax_jal_spill_64() {
 ; CHECK-RV64-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore ra
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
+; CHECK-RV64-NEXT:    .cfi_restore s2
+; CHECK-RV64-NEXT:    .cfi_restore s3
+; CHECK-RV64-NEXT:    .cfi_restore s4
+; CHECK-RV64-NEXT:    .cfi_restore s5
+; CHECK-RV64-NEXT:    .cfi_restore s6
+; CHECK-RV64-NEXT:    .cfi_restore s7
+; CHECK-RV64-NEXT:    .cfi_restore s8
+; CHECK-RV64-NEXT:    .cfi_restore s9
+; CHECK-RV64-NEXT:    .cfi_restore s10
+; CHECK-RV64-NEXT:    .cfi_restore s11
 ; CHECK-RV64-NEXT:    addi sp, sp, 112
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 
   %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
@@ -2318,6 +2404,7 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
 ; CHECK-RV32-NEXT:    # reg use t6
 ; CHECK-RV32-NEXT:    #NO_APP
 ; CHECK-RV32-NEXT:    addi sp, s0, -2032
+; CHECK-RV32-NEXT:    .cfi_def_cfa sp, 2032
 ; CHECK-RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
@@ -2331,7 +2418,21 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
 ; CHECK-RV32-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore ra
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
+; CHECK-RV32-NEXT:    .cfi_restore s2
+; CHECK-RV32-NEXT:    .cfi_restore s3
+; CHECK-RV32-NEXT:    .cfi_restore s4
+; CHECK-RV32-NEXT:    .cfi_restore s5
+; CHECK-RV32-NEXT:    .cfi_restore s6
+; CHECK-RV32-NEXT:    .cfi_restore s7
+; CHECK-RV32-NEXT:    .cfi_restore s8
+; CHECK-RV32-NEXT:    .cfi_restore s9
+; CHECK-RV32-NEXT:    .cfi_restore s10
+; CHECK-RV32-NEXT:    .cfi_restore s11
 ; CHECK-RV32-NEXT:    addi sp, sp, 2032
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: relax_jal_spill_64_adjust_spill_slot:
@@ -2552,6 +2653,7 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    # reg use t6
 ; CHECK-RV64-NEXT:    #NO_APP
 ; CHECK-RV64-NEXT:    addi sp, s0, -2032
+; CHECK-RV64-NEXT:    .cfi_def_cfa sp, 2032
 ; CHECK-RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
@@ -2565,7 +2667,21 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
 ; CHECK-RV64-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore ra
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
+; CHECK-RV64-NEXT:    .cfi_restore s2
+; CHECK-RV64-NEXT:    .cfi_restore s3
+; CHECK-RV64-NEXT:    .cfi_restore s4
+; CHECK-RV64-NEXT:    .cfi_restore s5
+; CHECK-RV64-NEXT:    .cfi_restore s6
+; CHECK-RV64-NEXT:    .cfi_restore s7
+; CHECK-RV64-NEXT:    .cfi_restore s8
+; CHECK-RV64-NEXT:    .cfi_restore s9
+; CHECK-RV64-NEXT:    .cfi_restore s10
+; CHECK-RV64-NEXT:    .cfi_restore s11
 ; CHECK-RV64-NEXT:    addi sp, sp, 2032
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 
   ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
@@ -2873,7 +2989,21 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore ra
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
+; CHECK-RV32-NEXT:    .cfi_restore s2
+; CHECK-RV32-NEXT:    .cfi_restore s3
+; CHECK-RV32-NEXT:    .cfi_restore s4
+; CHECK-RV32-NEXT:    .cfi_restore s5
+; CHECK-RV32-NEXT:    .cfi_restore s6
+; CHECK-RV32-NEXT:    .cfi_restore s7
+; CHECK-RV32-NEXT:    .cfi_restore s8
+; CHECK-RV32-NEXT:    .cfi_restore s9
+; CHECK-RV32-NEXT:    .cfi_restore s10
+; CHECK-RV32-NEXT:    .cfi_restore s11
 ; CHECK-RV32-NEXT:    addi sp, sp, 64
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ; CHECK-RV32-NEXT:  .LBB6_5: # %cond_3
 ; CHECK-RV32-NEXT:    beq t1, t2, .LBB6_4
@@ -3120,7 +3250,21 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore ra
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
+; CHECK-RV64-NEXT:    .cfi_restore s2
+; CHECK-RV64-NEXT:    .cfi_restore s3
+; CHECK-RV64-NEXT:    .cfi_restore s4
+; CHECK-RV64-NEXT:    .cfi_restore s5
+; CHECK-RV64-NEXT:    .cfi_restore s6
+; CHECK-RV64-NEXT:    .cfi_restore s7
+; CHECK-RV64-NEXT:    .cfi_restore s8
+; CHECK-RV64-NEXT:    .cfi_restore s9
+; CHECK-RV64-NEXT:    .cfi_restore s10
+; CHECK-RV64-NEXT:    .cfi_restore s11
 ; CHECK-RV64-NEXT:    addi sp, sp, 128
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ; CHECK-RV64-NEXT:  .LBB6_5: # %cond_3
 ; CHECK-RV64-NEXT:    sext.w t5, t2
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 6d2263f74062df..2a26602de9e1e7 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -432,7 +432,8 @@ define void @callee() nounwind {
 ; RV32IZCMP-NEXT:    sw a0, %lo(var+4)(t0)
 ; RV32IZCMP-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; RV32IZCMP-NEXT:    sw a0, %lo(var)(t0)
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s11}, 96
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s11}, 96
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-WITH-FP-LABEL: callee:
 ; RV32IZCMP-WITH-FP:       # %bb.0:
@@ -941,7 +942,8 @@ define void @callee() nounwind {
 ; RV64IZCMP-NEXT:    sw a0, %lo(var+4)(t0)
 ; RV64IZCMP-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
 ; RV64IZCMP-NEXT:    sw a0, %lo(var)(t0)
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s11}, 160
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-WITH-FP-LABEL: callee:
 ; RV64IZCMP-WITH-FP:       # %bb.0:
@@ -1611,7 +1613,8 @@ define void @caller() nounwind {
 ; RV32IZCMP-NEXT:    lw a0, 92(sp) # 4-byte Folded Reload
 ; RV32IZCMP-NEXT:    sw a0, %lo(var)(s0)
 ; RV32IZCMP-NEXT:    addi sp, sp, 48
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s11}, 112
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s11}, 112
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-WITH-FP-LABEL: caller:
 ; RV32IZCMP-WITH-FP:       # %bb.0:
@@ -2306,7 +2309,8 @@ define void @caller() nounwind {
 ; RV64IZCMP-NEXT:    ld a0, 168(sp) # 8-byte Folded Reload
 ; RV64IZCMP-NEXT:    sw a0, %lo(var)(s0)
 ; RV64IZCMP-NEXT:    addi sp, sp, 128
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s11}, 160
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-WITH-FP-LABEL: caller:
 ; RV64IZCMP-WITH-FP:       # %bb.0:
@@ -2472,7 +2476,9 @@ define void @foo() {
 ; RV32I-NEXT:    li s4, 0
 ; RV32I-NEXT:    #NO_APP
 ; RV32I-NEXT:    lw s4, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore s4
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: foo:
@@ -2500,7 +2506,11 @@ define void @foo() {
 ; RV32I-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    lw s4, 4(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    .cfi_restore ra
+; RV32I-WITH-FP-NEXT:    .cfi_restore s0
+; RV32I-WITH-FP-NEXT:    .cfi_restore s4
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, 16
+; RV32I-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-WITH-FP-NEXT:    ret
 ;
 ; RV32IZCMP-LABEL: foo:
@@ -2511,7 +2521,10 @@ define void @foo() {
 ; RV32IZCMP-NEXT:    #APP
 ; RV32IZCMP-NEXT:    li s4, 0
 ; RV32IZCMP-NEXT:    #NO_APP
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s4}, 32
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s4}, 32
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore s4
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-WITH-FP-LABEL: foo:
 ; RV32IZCMP-WITH-FP:       # %bb.0: # %entry
@@ -2531,7 +2544,11 @@ define void @foo() {
 ; RV32IZCMP-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    lw s4, 4(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore s0
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore s4
 ; RV32IZCMP-WITH-FP-NEXT:    addi sp, sp, 16
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZCMP-WITH-FP-NEXT:    ret
 ;
 ; RV64I-LABEL: foo:
@@ -2544,7 +2561,9 @@ define void @foo() {
 ; RV64I-NEXT:    li s4, 0
 ; RV64I-NEXT:    #NO_APP
 ; RV64I-NEXT:    ld s4, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore s4
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: foo:
@@ -2572,7 +2591,11 @@ define void @foo() {
 ; RV64I-WITH-FP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    ld s4, 8(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    .cfi_restore ra
+; RV64I-WITH-FP-NEXT:    .cfi_restore s0
+; RV64I-WITH-FP-NEXT:    .cfi_restore s4
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, 32
+; RV64I-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-WITH-FP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: foo:
@@ -2583,7 +2606,10 @@ define void @foo() {
 ; RV64IZCMP-NEXT:    #APP
 ; RV64IZCMP-NEXT:    li s4, 0
 ; RV64IZCMP-NEXT:    #NO_APP
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s4}, 48
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s4}, 48
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore s4
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-WITH-FP-LABEL: foo:
 ; RV64IZCMP-WITH-FP:       # %bb.0: # %entry
@@ -2603,7 +2629,11 @@ define void @foo() {
 ; RV64IZCMP-WITH-FP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    ld s4, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore s0
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore s4
 ; RV64IZCMP-WITH-FP-NEXT:    addi sp, sp, 32
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IZCMP-WITH-FP-NEXT:    ret
 entry:
   tail call void asm sideeffect "li s4, 0", "~{s4}"()
@@ -2622,7 +2652,9 @@ define void @bar() {
 ; RV32I-NEXT:    li s11, 0
 ; RV32I-NEXT:    #NO_APP
 ; RV32I-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore s11
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: bar:
@@ -2650,7 +2682,11 @@ define void @bar() {
 ; RV32I-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    lw s11, 4(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    .cfi_restore ra
+; RV32I-WITH-FP-NEXT:    .cfi_restore s0
+; RV32I-WITH-FP-NEXT:    .cfi_restore s11
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, 16
+; RV32I-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-WITH-FP-NEXT:    ret
 ;
 ; RV32IZCMP-LABEL: bar:
@@ -2661,7 +2697,10 @@ define void @bar() {
 ; RV32IZCMP-NEXT:    #APP
 ; RV32IZCMP-NEXT:    li s11, 0
 ; RV32IZCMP-NEXT:    #NO_APP
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s11}, 64
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s11}, 64
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore s11
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-WITH-FP-LABEL: bar:
 ; RV32IZCMP-WITH-FP:       # %bb.0: # %entry
@@ -2681,7 +2720,11 @@ define void @bar() {
 ; RV32IZCMP-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    lw s11, 4(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore s0
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore s11
 ; RV32IZCMP-WITH-FP-NEXT:    addi sp, sp, 16
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZCMP-WITH-FP-NEXT:    ret
 ;
 ; RV64I-LABEL: bar:
@@ -2694,7 +2737,9 @@ define void @bar() {
 ; RV64I-NEXT:    li s11, 0
 ; RV64I-NEXT:    #NO_APP
 ; RV64I-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore s11
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: bar:
@@ -2722,7 +2767,11 @@ define void @bar() {
 ; RV64I-WITH-FP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    .cfi_restore ra
+; RV64I-WITH-FP-NEXT:    .cfi_restore s0
+; RV64I-WITH-FP-NEXT:    .cfi_restore s11
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, 32
+; RV64I-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-WITH-FP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: bar:
@@ -2733,7 +2782,10 @@ define void @bar() {
 ; RV64IZCMP-NEXT:    #APP
 ; RV64IZCMP-NEXT:    li s11, 0
 ; RV64IZCMP-NEXT:    #NO_APP
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s11}, 112
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s11}, 112
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore s11
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-WITH-FP-LABEL: bar:
 ; RV64IZCMP-WITH-FP:       # %bb.0: # %entry
@@ -2753,7 +2805,11 @@ define void @bar() {
 ; RV64IZCMP-WITH-FP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore s0
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore s11
 ; RV64IZCMP-WITH-FP-NEXT:    addi sp, sp, 32
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IZCMP-WITH-FP-NEXT:    ret
 entry:
   tail call void asm sideeffect "li s11, 0", "~{s11}"()
@@ -2777,7 +2833,9 @@ define void @varargs(...) {
 ; RV32I-NEXT:    sw a0, 16(sp)
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: varargs:
@@ -2794,7 +2852,9 @@ define void @varargs(...) {
 ; RV32I-ILP32E-NEXT:    sw a0, 4(sp)
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 28
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV32I-WITH-FP-LABEL: varargs:
@@ -2818,7 +2878,10 @@ define void @varargs(...) {
 ; RV32I-WITH-FP-NEXT:    call callee
 ; RV32I-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    .cfi_restore ra
+; RV32I-WITH-FP-NEXT:    .cfi_restore s0
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, 48
+; RV32I-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-WITH-FP-NEXT:    ret
 ;
 ; RV32IZCMP-LABEL: varargs:
@@ -2837,7 +2900,9 @@ define void @varargs(...) {
 ; RV32IZCMP-NEXT:    sw a0, 16(sp)
 ; RV32IZCMP-NEXT:    call callee
 ; RV32IZCMP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT:    .cfi_restore ra
 ; RV32IZCMP-NEXT:    addi sp, sp, 48
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-WITH-FP-LABEL: varargs:
@@ -2861,7 +2926,10 @@ define void @varargs(...) {
 ; RV32IZCMP-WITH-FP-NEXT:    call callee
 ; RV32IZCMP-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZCMP-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_restore s0
 ; RV32IZCMP-WITH-FP-NEXT:    addi sp, sp, 48
+; RV32IZCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZCMP-WITH-FP-NEXT:    ret
 ;
 ; RV64I-LABEL: varargs:
@@ -2880,7 +2948,9 @@ define void @varargs(...) {
 ; RV64I-NEXT:    sd a0, 16(sp)
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 80
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: varargs:
@@ -2897,7 +2967,9 @@ define void @varargs(...) {
 ; RV64I-LP64E-NEXT:    sd a0, 8(sp)
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 56
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
 ;
 ; RV64I-WITH-FP-LABEL: varargs:
@@ -2921,7 +2993,10 @@ define void @varargs(...) {
 ; RV64I-WITH-FP-NEXT:    call callee
 ; RV64I-WITH-FP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    .cfi_restore ra
+; RV64I-WITH-FP-NEXT:    .cfi_restore s0
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, 80
+; RV64I-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-WITH-FP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: varargs:
@@ -2940,7 +3015,9 @@ define void @varargs(...) {
 ; RV64IZCMP-NEXT:    sd a0, 16(sp)
 ; RV64IZCMP-NEXT:    call callee
 ; RV64IZCMP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT:    .cfi_restore ra
 ; RV64IZCMP-NEXT:    addi sp, sp, 80
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-WITH-FP-LABEL: varargs:
@@ -2964,7 +3041,10 @@ define void @varargs(...) {
 ; RV64IZCMP-WITH-FP-NEXT:    call callee
 ; RV64IZCMP-WITH-FP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZCMP-WITH-FP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_restore s0
 ; RV64IZCMP-WITH-FP-NEXT:    addi sp, sp, 80
+; RV64IZCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IZCMP-WITH-FP-NEXT:    ret
   call void @callee()
   ret void
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
index 708cb00d1c45c6..deaf32c54dacce 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
@@ -27,7 +27,10 @@ define i32 @callee_float_in_regs(i32 %a, float %b) {
 ; ILP32E-FPELIM-NEXT:    add a0, s0, a0
 ; ILP32E-FPELIM-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 8
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: callee_float_in_regs:
@@ -49,7 +52,11 @@ define i32 @callee_float_in_regs(i32 %a, float %b) {
 ; ILP32E-WITHFP-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s1, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
+; ILP32E-WITHFP-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 12
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_float_in_regs:
@@ -62,6 +69,8 @@ define i32 @callee_float_in_regs(i32 %a, float %b) {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a0, a1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call __fixsfsi
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, s0, a0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: callee_float_in_regs:
@@ -77,6 +86,9 @@ define i32 @callee_float_in_regs(i32 %a, float %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    mv a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call __fixsfsi
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, s1, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_2
   %b_fptosi = fptosi float %b to i32
   %1 = add i32 %a, %b_fptosi
@@ -94,7 +106,9 @@ define i32 @caller_float_in_regs() {
 ; ILP32E-FPELIM-NEXT:    lui a1, 262144
 ; ILP32E-FPELIM-NEXT:    call callee_float_in_regs
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_float_in_regs:
@@ -112,7 +126,10 @@ define i32 @caller_float_in_regs() {
 ; ILP32E-WITHFP-NEXT:    call callee_float_in_regs
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_float_in_regs:
@@ -123,6 +140,7 @@ define i32 @caller_float_in_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lui a1, 262144
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_float_in_regs
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_float_in_regs:
@@ -136,6 +154,8 @@ define i32 @caller_float_in_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a1, 262144
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_float_in_regs
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_float_in_regs(i32 1, float 2.0)
   ret i32 %1
@@ -164,7 +184,10 @@ define i32 @callee_float_on_stack(i64 %a, i64 %b, i64 %c, i64 %d, float %e) {
 ; ILP32E-WITHFP-NEXT:    add a0, a1, a0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_float_on_stack:
@@ -185,6 +208,8 @@ define i32 @callee_float_on_stack(i64 %a, i64 %b, i64 %c, i64 %d, float %e) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 8(s0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a1, 0(s0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = trunc i64 %d to i32
   %2 = bitcast float %e to i32
@@ -212,7 +237,9 @@ define i32 @caller_float_on_stack() {
 ; ILP32E-FPELIM-NEXT:    li a5, 0
 ; ILP32E-FPELIM-NEXT:    call callee_float_on_stack
 ; ILP32E-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 16
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_float_on_stack:
@@ -239,7 +266,10 @@ define i32 @caller_float_on_stack() {
 ; ILP32E-WITHFP-NEXT:    call callee_float_on_stack
 ; ILP32E-WITHFP-NEXT:    lw ra, 16(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 20
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_float_on_stack:
@@ -260,7 +290,9 @@ define i32 @caller_float_on_stack() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a3, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a5, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_float_on_stack
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 12
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_float_on_stack:
@@ -284,7 +316,10 @@ define i32 @caller_float_on_stack() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a3, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a5, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_float_on_stack
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 12
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_float_on_stack(i64 1, i64 2, i64 3, i64 4, float 5.0)
   ret i32 %1
@@ -309,7 +344,10 @@ define float @callee_tiny_scalar_ret() {
 ; ILP32E-WITHFP-NEXT:    lui a0, 260096
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_tiny_scalar_ret:
@@ -326,6 +364,8 @@ define float @callee_tiny_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a0, 260096
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret float 1.0
 }
@@ -339,7 +379,9 @@ define i32 @caller_tiny_scalar_ret() {
 ; ILP32E-FPELIM-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-NEXT:    call callee_tiny_scalar_ret
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_tiny_scalar_ret:
@@ -355,7 +397,10 @@ define i32 @caller_tiny_scalar_ret() {
 ; ILP32E-WITHFP-NEXT:    call callee_tiny_scalar_ret
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_tiny_scalar_ret:
@@ -364,6 +409,7 @@ define i32 @caller_tiny_scalar_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_tiny_scalar_ret
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_tiny_scalar_ret:
@@ -375,6 +421,8 @@ define i32 @caller_tiny_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_tiny_scalar_ret
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call float @callee_tiny_scalar_ret()
   %2 = bitcast float %1 to i32
@@ -400,7 +448,10 @@ define i32 @callee_double_in_regs(i32 %a, double %b) {
 ; ILP32E-FPELIM-NEXT:    add a0, s0, a0
 ; ILP32E-FPELIM-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 8
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: callee_double_in_regs:
@@ -423,7 +474,11 @@ define i32 @callee_double_in_regs(i32 %a, double %b) {
 ; ILP32E-WITHFP-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s1, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
+; ILP32E-WITHFP-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 12
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_double_in_regs:
@@ -437,6 +492,8 @@ define i32 @callee_double_in_regs(i32 %a, double %b) {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a1, a2
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call __fixdfsi
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, s0, a0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: callee_double_in_regs:
@@ -453,6 +510,9 @@ define i32 @callee_double_in_regs(i32 %a, double %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    mv a1, a2
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call __fixdfsi
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, s1, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_2
   %b_fptosi = fptosi double %b to i32
   %1 = add i32 %a, %b_fptosi
@@ -471,7 +531,9 @@ define i32 @caller_double_in_regs() {
 ; ILP32E-FPELIM-NEXT:    li a1, 0
 ; ILP32E-FPELIM-NEXT:    call callee_double_in_regs
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_double_in_regs:
@@ -490,7 +552,10 @@ define i32 @caller_double_in_regs() {
 ; ILP32E-WITHFP-NEXT:    call callee_double_in_regs
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_double_in_regs:
@@ -502,6 +567,7 @@ define i32 @caller_double_in_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lui a2, 262144
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a1, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_double_in_regs
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_double_in_regs:
@@ -516,6 +582,8 @@ define i32 @caller_double_in_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a2, 262144
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_double_in_regs
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_double_in_regs(i32 1, double 2.0)
   ret i32 %1
@@ -564,7 +632,10 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; ILP32E-WITHFP-NEXT:    add a0, a0, a4
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_aligned_stack:
@@ -601,6 +672,8 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a4, a5, a4
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a4
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = bitcast fp128 %c to i128
   %2 = trunc i128 %1 to i32
@@ -668,9 +741,13 @@ define void @caller_aligned_stack() {
 ; ILP32E-FPELIM-NEXT:    sw a6, 32(sp)
 ; ILP32E-FPELIM-NEXT:    call callee_aligned_stack
 ; ILP32E-FPELIM-NEXT:    addi sp, s0, -64
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa sp, 64
 ; ILP32E-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 64
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_aligned_stack:
@@ -723,9 +800,13 @@ define void @caller_aligned_stack() {
 ; ILP32E-WITHFP-NEXT:    sw a6, 32(sp)
 ; ILP32E-WITHFP-NEXT:    call callee_aligned_stack
 ; ILP32E-WITHFP-NEXT:    addi sp, s0, -64
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa sp, 64
 ; ILP32E-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 64
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_aligned_stack:
@@ -777,7 +858,11 @@ define void @caller_aligned_stack() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw a6, 32(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_aligned_stack
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -64
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_aligned_stack:
@@ -829,7 +914,11 @@ define void @caller_aligned_stack() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a6, 32(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_aligned_stack
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -64
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_aligned_stack(i32 1, i32 11,
     fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13,
@@ -859,7 +948,10 @@ define double @callee_small_scalar_ret() {
 ; ILP32E-WITHFP-NEXT:    li a0, 0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_small_scalar_ret:
@@ -878,6 +970,8 @@ define double @callee_small_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a1, 261888
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret double 1.0
 }
@@ -891,7 +985,9 @@ define i64 @caller_small_scalar_ret() {
 ; ILP32E-FPELIM-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-NEXT:    call callee_small_scalar_ret
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_small_scalar_ret:
@@ -907,7 +1003,10 @@ define i64 @caller_small_scalar_ret() {
 ; ILP32E-WITHFP-NEXT:    call callee_small_scalar_ret
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_small_scalar_ret:
@@ -916,6 +1015,7 @@ define i64 @caller_small_scalar_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_small_scalar_ret
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_small_scalar_ret:
@@ -927,6 +1027,8 @@ define i64 @caller_small_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_small_scalar_ret
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call double @callee_small_scalar_ret()
   %2 = bitcast double %1 to i64
@@ -955,7 +1057,10 @@ define i32 @callee_i64_in_regs(i32 %a, i64 %b) {
 ; ILP32E-WITHFP-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_i64_in_regs:
@@ -972,6 +1077,8 @@ define i32 @callee_i64_in_regs(i32 %a, i64 %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %b_trunc = trunc i64 %b to i32
   %1 = add i32 %a, %b_trunc
@@ -990,7 +1097,9 @@ define i32 @caller_i64_in_regs() {
 ; ILP32E-FPELIM-NEXT:    li a2, 0
 ; ILP32E-FPELIM-NEXT:    call callee_i64_in_regs
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_i64_in_regs:
@@ -1009,7 +1118,10 @@ define i32 @caller_i64_in_regs() {
 ; ILP32E-WITHFP-NEXT:    call callee_i64_in_regs
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_i64_in_regs:
@@ -1021,6 +1133,7 @@ define i32 @caller_i64_in_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a2, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_i64_in_regs
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_i64_in_regs:
@@ -1035,6 +1148,8 @@ define i32 @caller_i64_in_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a2, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_i64_in_regs
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_i64_in_regs(i32 1, i64 2)
   ret i32 %1
@@ -1093,7 +1208,10 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
 ; ILP32E-WITHFP-NEXT:    add a0, a1, a0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_many_scalars:
@@ -1142,6 +1260,8 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a7
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a6
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %a_ext = zext i8 %a to i32
   %b_ext = zext i16 %b to i32
@@ -1178,7 +1298,9 @@ define i32 @caller_many_scalars() {
 ; ILP32E-FPELIM-NEXT:    li a4, 0
 ; ILP32E-FPELIM-NEXT:    call callee_many_scalars
 ; ILP32E-FPELIM-NEXT:    lw ra, 16(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 20
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_many_scalars:
@@ -1207,7 +1329,10 @@ define i32 @caller_many_scalars() {
 ; ILP32E-WITHFP-NEXT:    call callee_many_scalars
 ; ILP32E-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 24
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_many_scalars:
@@ -1230,7 +1355,9 @@ define i32 @caller_many_scalars() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw a4, 0(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a4, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_many_scalars
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 16
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_many_scalars:
@@ -1256,7 +1383,10 @@ define i32 @caller_many_scalars() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a4, 0(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a4, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_many_scalars
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8)
   ret i32 %1
@@ -1313,7 +1443,10 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) {
 ; ILP32E-WITHFP-NEXT:    seqz a0, a0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_large_scalars:
@@ -1360,6 +1493,8 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a1, a0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a0, a3
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    seqz a0, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %b_bitcast = bitcast fp128 %b to i128
   %1 = icmp eq i128 %a, %b_bitcast
@@ -1393,9 +1528,13 @@ define i32 @caller_large_scalars() {
 ; ILP32E-FPELIM-NEXT:    sw a2, 24(sp)
 ; ILP32E-FPELIM-NEXT:    call callee_large_scalars
 ; ILP32E-FPELIM-NEXT:    addi sp, s0, -48
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa sp, 48
 ; ILP32E-FPELIM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 48
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_large_scalars:
@@ -1423,9 +1562,13 @@ define i32 @caller_large_scalars() {
 ; ILP32E-WITHFP-NEXT:    sw a2, 24(sp)
 ; ILP32E-WITHFP-NEXT:    call callee_large_scalars
 ; ILP32E-WITHFP-NEXT:    addi sp, s0, -48
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa sp, 48
 ; ILP32E-WITHFP-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 48
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_scalars:
@@ -1452,7 +1595,11 @@ define i32 @caller_large_scalars() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw a2, 24(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_scalars
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -48
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 48
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 40
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalars:
@@ -1479,7 +1626,11 @@ define i32 @caller_large_scalars() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a2, 24(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_scalars
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -48
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 48
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 40
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_large_scalars(i128 1, fp128 0xL00000000000000007FFF000000000000)
   ret i32 %1
@@ -1542,7 +1693,10 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d,
 ; ILP32E-WITHFP-NEXT:    seqz a0, a0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_large_scalars_exhausted_regs:
@@ -1593,6 +1747,8 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d,
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a1, a0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a0, a3
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    seqz a0, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %j_bitcast = bitcast fp128 %j to i128
   %1 = icmp eq i128 %h, %j_bitcast
@@ -1638,9 +1794,13 @@ define i32 @caller_large_scalars_exhausted_regs() {
 ; ILP32E-FPELIM-NEXT:    sw zero, 44(sp)
 ; ILP32E-FPELIM-NEXT:    call callee_large_scalars_exhausted_regs
 ; ILP32E-FPELIM-NEXT:    addi sp, s0, -64
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa sp, 64
 ; ILP32E-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 64
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_large_scalars_exhausted_regs:
@@ -1680,9 +1840,13 @@ define i32 @caller_large_scalars_exhausted_regs() {
 ; ILP32E-WITHFP-NEXT:    sw zero, 44(sp)
 ; ILP32E-WITHFP-NEXT:    call callee_large_scalars_exhausted_regs
 ; ILP32E-WITHFP-NEXT:    addi sp, s0, -64
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa sp, 64
 ; ILP32E-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 64
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_scalars_exhausted_regs:
@@ -1721,7 +1885,11 @@ define i32 @caller_large_scalars_exhausted_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw zero, 44(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_scalars_exhausted_regs
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -64
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 56
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalars_exhausted_regs:
@@ -1760,7 +1928,11 @@ define i32 @caller_large_scalars_exhausted_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 44(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_scalars_exhausted_regs
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -64
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 56
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_large_scalars_exhausted_regs(
       i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 8, i32 9,
@@ -1788,9 +1960,13 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 ; ILP32E-FPELIM-NEXT:    call __floatditf
 ; ILP32E-FPELIM-NEXT:    lw a0, 0(sp)
 ; ILP32E-FPELIM-NEXT:    addi sp, s0, -24
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa sp, 24
 ; ILP32E-FPELIM-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 24
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_mixed_scalar_libcalls:
@@ -1810,9 +1986,13 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 ; ILP32E-WITHFP-NEXT:    call __floatditf
 ; ILP32E-WITHFP-NEXT:    lw a0, 0(sp)
 ; ILP32E-WITHFP-NEXT:    addi sp, s0, -24
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa sp, 24
 ; ILP32E-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 24
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_mixed_scalar_libcalls:
@@ -1831,7 +2011,11 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call __floatditf
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lw a0, 0(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 16
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_mixed_scalar_libcalls:
@@ -1850,7 +2034,11 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call __floatditf
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 0(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = sitofp i64 %a to fp128
   %2 = bitcast fp128 %1 to i128
@@ -1884,7 +2072,10 @@ define i32 @callee_small_coerced_struct([2 x i32] %a.coerce) {
 ; ILP32E-WITHFP-NEXT:    seqz a0, a0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_small_coerced_struct:
@@ -1903,6 +2094,8 @@ define i32 @callee_small_coerced_struct([2 x i32] %a.coerce) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    xor a0, a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    seqz a0, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = extractvalue [2 x i32] %a.coerce, 0
   %2 = extractvalue [2 x i32] %a.coerce, 1
@@ -1922,7 +2115,9 @@ define i32 @caller_small_coerced_struct() {
 ; ILP32E-FPELIM-NEXT:    li a1, 2
 ; ILP32E-FPELIM-NEXT:    call callee_small_coerced_struct
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_small_coerced_struct:
@@ -1940,7 +2135,10 @@ define i32 @caller_small_coerced_struct() {
 ; ILP32E-WITHFP-NEXT:    call callee_small_coerced_struct
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_small_coerced_struct:
@@ -1951,6 +2149,7 @@ define i32 @caller_small_coerced_struct() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_small_coerced_struct
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_small_coerced_struct:
@@ -1964,6 +2163,8 @@ define i32 @caller_small_coerced_struct() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_small_coerced_struct
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_small_coerced_struct([2 x i32] [i32 1, i32 2])
   ret i32 %1
@@ -1996,7 +2197,10 @@ define i32 @callee_large_struct(ptr byval(%struct.large) align 4 %a) {
 ; ILP32E-WITHFP-NEXT:    add a0, a1, a0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_large_struct:
@@ -2017,6 +2221,8 @@ define i32 @callee_large_struct(ptr byval(%struct.large) align 4 %a) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a1, 0(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 12(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = getelementptr inbounds %struct.large, ptr %a, i32 0, i32 0
   %2 = getelementptr inbounds %struct.large, ptr %a, i32 0, i32 3
@@ -2048,7 +2254,9 @@ define i32 @caller_large_struct() {
 ; ILP32E-FPELIM-NEXT:    mv a0, sp
 ; ILP32E-FPELIM-NEXT:    call callee_large_struct
 ; ILP32E-FPELIM-NEXT:    lw ra, 32(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 36
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_large_struct:
@@ -2077,7 +2285,10 @@ define i32 @caller_large_struct() {
 ; ILP32E-WITHFP-NEXT:    call callee_large_struct
 ; ILP32E-WITHFP-NEXT:    lw ra, 36(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 32(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 40
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_struct:
@@ -2100,7 +2311,9 @@ define i32 @caller_large_struct() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw a3, 12(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a0, sp
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_struct
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 32
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_struct:
@@ -2126,7 +2339,10 @@ define i32 @caller_large_struct() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a3, -28(s0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi a0, s0, -40
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_struct
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 32
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %ls = alloca %struct.large, align 4
   %1 = bitcast ptr %ls to ptr
@@ -2165,7 +2381,10 @@ define %struct.small @callee_small_struct_ret() {
 ; ILP32E-WITHFP-NEXT:    li a1, 0
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_small_struct_ret:
@@ -2184,6 +2403,8 @@ define %struct.small @callee_small_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret %struct.small { i32 1, ptr null }
 }
@@ -2198,7 +2419,9 @@ define i32 @caller_small_struct_ret() {
 ; ILP32E-FPELIM-NEXT:    call callee_small_struct_ret
 ; ILP32E-FPELIM-NEXT:    add a0, a0, a1
 ; ILP32E-FPELIM-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 4
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_small_struct_ret:
@@ -2215,7 +2438,10 @@ define i32 @caller_small_struct_ret() {
 ; ILP32E-WITHFP-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_small_struct_ret:
@@ -2225,6 +2451,7 @@ define i32 @caller_small_struct_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_small_struct_ret
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, a0, a1
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_small_struct_ret:
@@ -2237,6 +2464,8 @@ define i32 @caller_small_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_small_struct_ret
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call %struct.small @callee_small_struct_ret()
   %2 = extractvalue %struct.small %1, 0
@@ -2275,7 +2504,10 @@ define fp128 @callee_large_scalar_ret() {
 ; ILP32E-WITHFP-NEXT:    sw zero, 0(a0)
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_large_scalar_ret:
@@ -2300,6 +2532,8 @@ define fp128 @callee_large_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 8(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 4(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 0(a0)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret fp128 0xL00000000000000007FFF000000000000
 }
@@ -2319,9 +2553,13 @@ define void @caller_large_scalar_ret() {
 ; ILP32E-FPELIM-NEXT:    mv a0, sp
 ; ILP32E-FPELIM-NEXT:    call callee_large_scalar_ret
 ; ILP32E-FPELIM-NEXT:    addi sp, s0, -32
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa sp, 32
 ; ILP32E-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 32
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_large_scalar_ret:
@@ -2338,9 +2576,13 @@ define void @caller_large_scalar_ret() {
 ; ILP32E-WITHFP-NEXT:    mv a0, sp
 ; ILP32E-WITHFP-NEXT:    call callee_large_scalar_ret
 ; ILP32E-WITHFP-NEXT:    addi sp, s0, -32
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa sp, 32
 ; ILP32E-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 32
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_scalar_ret:
@@ -2356,7 +2598,11 @@ define void @caller_large_scalar_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a0, sp
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_scalar_ret
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -32
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 32
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalar_ret:
@@ -2372,7 +2618,11 @@ define void @caller_large_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    mv a0, sp
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_scalar_ret
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -32
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 32
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call fp128 @callee_large_scalar_ret()
   ret void
@@ -2413,7 +2663,10 @@ define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result
 ; ILP32E-WITHFP-NEXT:    sw a1, 12(a0)
 ; ILP32E-WITHFP-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 8
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_large_struct_ret:
@@ -2444,6 +2697,8 @@ define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a1, 8(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 4
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a1, 12(a0)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %a = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 0
   store i32 1, ptr %a, align 4
@@ -2474,9 +2729,13 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-FPELIM-NEXT:    lw a1, 12(sp)
 ; ILP32E-FPELIM-NEXT:    add a0, a0, a1
 ; ILP32E-FPELIM-NEXT:    addi sp, s0, -24
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa sp, 24
 ; ILP32E-FPELIM-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32E-FPELIM-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32E-FPELIM-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 24
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller_large_struct_ret:
@@ -2496,9 +2755,13 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-WITHFP-NEXT:    lw a1, 12(sp)
 ; ILP32E-WITHFP-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-NEXT:    addi sp, s0, -24
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa sp, 24
 ; ILP32E-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 24
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; ILP32E-FPELIM-SAVE-RESTORE-LABEL: caller_large_struct_ret:
@@ -2517,7 +2780,11 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lw a1, 12(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, a0, a1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 16
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_struct_ret:
@@ -2536,7 +2803,11 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a1, 12(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = alloca %struct.large
   call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
diff --git a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
index 2103c3e60b591c..647c27752b1ed6 100644
--- a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
+++ b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
@@ -43,7 +43,8 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK32ZCMP-NEXT:    cm.mva01s s1, s0
 ; CHECK32ZCMP-NEXT:    call func
 ; CHECK32ZCMP-NEXT:    add a0, s2, s0
-; CHECK32ZCMP-NEXT:    cm.popret {ra, s0-s2}, 16
+; CHECK32ZCMP-NEXT:    cm.pop {ra, s0-s2}, 16
+; CHECK32ZCMP-NEXT:    ret
 ;
 ; CHECK64I-LABEL: zcmp_mv:
 ; CHECK64I:       # %bb.0:
@@ -76,7 +77,8 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK64ZCMP-NEXT:    cm.mva01s s1, s0
 ; CHECK64ZCMP-NEXT:    call func
 ; CHECK64ZCMP-NEXT:    addw a0, s2, s0
-; CHECK64ZCMP-NEXT:    cm.popret {ra, s0-s2}, 32
+; CHECK64ZCMP-NEXT:    cm.pop {ra, s0-s2}, 32
+; CHECK64ZCMP-NEXT:    ret
   %call = call i32 @func(i32 %num, i32 %f)
   %call1 = call i32 @func(i32 %num, i32 %f)
   %res = add i32 %call, %f
@@ -119,7 +121,8 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK32ZCMP-NEXT:    li a0, 1
 ; CHECK32ZCMP-NEXT:    mv a1, s0
 ; CHECK32ZCMP-NEXT:    call func
-; CHECK32ZCMP-NEXT:    cm.popret {ra, s0-s1}, 16
+; CHECK32ZCMP-NEXT:    cm.pop {ra, s0-s1}, 16
+; CHECK32ZCMP-NEXT:    ret
 ;
 ; CHECK64I-LABEL: not_zcmp_mv:
 ; CHECK64I:       # %bb.0:
@@ -156,7 +159,8 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
 ; CHECK64ZCMP-NEXT:    li a0, 1
 ; CHECK64ZCMP-NEXT:    mv a1, s0
 ; CHECK64ZCMP-NEXT:    call func
-; CHECK64ZCMP-NEXT:    cm.popret {ra, s0-s1}, 32
+; CHECK64ZCMP-NEXT:    cm.pop {ra, s0-s1}, 32
+; CHECK64ZCMP-NEXT:    ret
   %call = call i32 @foo(i32 %num)
   %call1 = call i32 @foo(i32 %f)
   %tmp = call i32 @foo(i32 %call)
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index ea4af4cb60cd3d..bf21ee6696a282 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -1571,7 +1571,9 @@ define double @maximumnum_double(double %x, double %y) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call fmaximum_num
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: maximumnum_double:
@@ -1582,7 +1584,9 @@ define double @maximumnum_double(double %x, double %y) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call fmaximum_num
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
   %z = call double @llvm.maximumnum.f64(double %x, double %y)
   ret double %z
@@ -1614,7 +1618,9 @@ define double @minimumnum_double(double %x, double %y) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call fminimum_num
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: minimumnum_double:
@@ -1625,7 +1631,9 @@ define double @minimumnum_double(double %x, double %y) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call fminimum_num
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
   %z = call double @llvm.minimumnum.f64(double %x, double %y)
   ret double %z
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 12f025c65f36ae..3edbda3a4bf6be 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -88,7 +88,9 @@ define i64 @test_floor_si64(double %x) {
 ; RV32IFD-NEXT:    call floor
 ; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_floor_si64:
@@ -105,7 +107,9 @@ define i64 @test_floor_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_floor_si64:
@@ -197,7 +201,9 @@ define i64 @test_floor_ui64(double %x) {
 ; RV32IFD-NEXT:    call floor
 ; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_floor_ui64:
@@ -214,7 +220,9 @@ define i64 @test_floor_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_floor_ui64:
@@ -306,7 +314,9 @@ define i64 @test_ceil_si64(double %x) {
 ; RV32IFD-NEXT:    call ceil
 ; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_ceil_si64:
@@ -323,7 +333,9 @@ define i64 @test_ceil_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_ceil_si64:
@@ -415,7 +427,9 @@ define i64 @test_ceil_ui64(double %x) {
 ; RV32IFD-NEXT:    call ceil
 ; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_ceil_ui64:
@@ -432,7 +446,9 @@ define i64 @test_ceil_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
@@ -524,7 +540,9 @@ define i64 @test_trunc_si64(double %x) {
 ; RV32IFD-NEXT:    call trunc
 ; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_trunc_si64:
@@ -541,7 +559,9 @@ define i64 @test_trunc_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_trunc_si64:
@@ -633,7 +653,9 @@ define i64 @test_trunc_ui64(double %x) {
 ; RV32IFD-NEXT:    call trunc
 ; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_trunc_ui64:
@@ -650,7 +672,9 @@ define i64 @test_trunc_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
@@ -742,7 +766,9 @@ define i64 @test_round_si64(double %x) {
 ; RV32IFD-NEXT:    call round
 ; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_round_si64:
@@ -759,7 +785,9 @@ define i64 @test_round_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_round_si64:
@@ -851,7 +879,9 @@ define i64 @test_round_ui64(double %x) {
 ; RV32IFD-NEXT:    call round
 ; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_round_ui64:
@@ -868,7 +898,9 @@ define i64 @test_round_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_round_ui64:
@@ -960,7 +992,9 @@ define i64 @test_roundeven_si64(double %x) {
 ; RV32IFD-NEXT:    call roundeven
 ; RV32IFD-NEXT:    call __fixdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_roundeven_si64:
@@ -977,7 +1011,9 @@ define i64 @test_roundeven_si64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    call __fixdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
@@ -1069,7 +1105,9 @@ define i64 @test_roundeven_ui64(double %x) {
 ; RV32IFD-NEXT:    call roundeven
 ; RV32IFD-NEXT:    call __fixunsdfdi
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: test_roundeven_ui64:
@@ -1086,7 +1124,9 @@ define i64 @test_roundeven_ui64(double %x) {
 ; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
@@ -1125,7 +1165,9 @@ define double @test_floor_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINXZDINX-NEXT:    call floor
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_floor_double:
@@ -1172,7 +1214,9 @@ define double @test_ceil_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINXZDINX-NEXT:    call ceil
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_ceil_double:
@@ -1219,7 +1263,9 @@ define double @test_trunc_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINXZDINX-NEXT:    call trunc
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_trunc_double:
@@ -1266,7 +1312,9 @@ define double @test_round_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINXZDINX-NEXT:    call round
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_round_double:
@@ -1313,7 +1361,9 @@ define double @test_roundeven_double(double %x) {
 ; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINXZDINX-NEXT:    call roundeven
 ; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
 ; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
 ; RV64IZFINXZDINX-LABEL: test_roundeven_double:
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 0c2b809c0be20c..0a3b5d47e5650a 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -86,7 +86,9 @@ define void @_Z3foov() {
 ; CHECK-NEXT:    slli a1, a0, 3
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_49, i64 2)
diff --git a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
index c4d932acbcc8b5..62dd3fe1e2f30c 100644
--- a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
+++ b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
@@ -12,7 +12,9 @@ define void @dwarf() {
 ; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    call foo
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: dwarf:
@@ -24,7 +26,9 @@ define void @dwarf() {
 ; RV64-NEXT:    addi a0, sp, 16
 ; RV64-NEXT:    call foo
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
index 067690333fdba7..851af342b391d8 100644
--- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
+++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
@@ -40,7 +40,11 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_4: # %lpad
 ; RV32I-NEXT:  .Ltmp4:
@@ -77,7 +81,11 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB0_4: # %lpad
 ; RV64I-NEXT:  .Ltmp4:
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 52442026dab502..ba422ffdcd3b89 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -2216,7 +2216,9 @@ define float @maximumnum_float(float %x, float %y) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call fmaximum_numf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: maximumnum_float:
@@ -2227,7 +2229,9 @@ define float @maximumnum_float(float %x, float %y) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call fmaximum_numf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
   %z = call float @llvm.maximumnum.f32(float %x, float %y)
   ret float %z
@@ -2264,7 +2268,9 @@ define float @minimumnum_float(float %x, float %y) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call fminimum_numf
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: minimumnum_float:
@@ -2275,7 +2281,9 @@ define float @minimumnum_float(float %x, float %y) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call fminimum_numf
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
   %z = call float @llvm.minimumnum.f32(float %x, float %y)
   ret float %z
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
index 1b134484397528..837ff766b430f0 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -102,7 +102,9 @@ define i64 @test_floor_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_floor_si64:
@@ -127,7 +129,9 @@ define i64 @test_floor_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_floor_si64:
@@ -233,7 +237,9 @@ define i64 @test_floor_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_floor_ui64:
@@ -258,7 +264,9 @@ define i64 @test_floor_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_floor_ui64:
@@ -364,7 +372,9 @@ define i64 @test_ceil_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_ceil_si64:
@@ -389,7 +399,9 @@ define i64 @test_ceil_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_ceil_si64:
@@ -495,7 +507,9 @@ define i64 @test_ceil_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_ceil_ui64:
@@ -520,7 +534,9 @@ define i64 @test_ceil_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_ceil_ui64:
@@ -626,7 +642,9 @@ define i64 @test_trunc_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_trunc_si64:
@@ -651,7 +669,9 @@ define i64 @test_trunc_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_trunc_si64:
@@ -757,7 +777,9 @@ define i64 @test_trunc_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_trunc_ui64:
@@ -782,7 +804,9 @@ define i64 @test_trunc_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_trunc_ui64:
@@ -888,7 +912,9 @@ define i64 @test_round_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_round_si64:
@@ -913,7 +939,9 @@ define i64 @test_round_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_round_si64:
@@ -1019,7 +1047,9 @@ define i64 @test_round_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_round_ui64:
@@ -1044,7 +1074,9 @@ define i64 @test_round_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_round_ui64:
@@ -1150,7 +1182,9 @@ define i64 @test_roundeven_si64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixsfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_roundeven_si64:
@@ -1175,7 +1209,9 @@ define i64 @test_roundeven_si64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixsfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_roundeven_si64:
@@ -1281,7 +1317,9 @@ define i64 @test_roundeven_ui64(float %x) {
 ; RV32IF-NEXT:    .cfi_offset ra, -4
 ; RV32IF-NEXT:    call __fixunssfdi
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_roundeven_ui64:
@@ -1306,7 +1344,9 @@ define i64 @test_roundeven_ui64(float %x) {
 ; RV32IZFINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZFINX-NEXT:    call __fixunssfdi
 ; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT:    .cfi_restore ra
 ; RV32IZFINX-NEXT:    addi sp, sp, 16
+; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFINX-NEXT:    ret
 ;
 ; RV64IZFINX-LABEL: test_roundeven_ui64:
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index c44f4942e9e699..bbdfda5c1e10d5 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -47,7 +47,9 @@ define i32 @stest_f64i32(double %x) {
 ; RV32IF-NEXT:    lui a0, 524288
 ; RV32IF-NEXT:  .LBB0_9: # %entry
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: stest_f64i32:
@@ -68,7 +70,9 @@ define i32 @stest_f64i32(double %x) {
 ; RV64IF-NEXT:    lui a0, 524288
 ; RV64IF-NEXT:  .LBB0_4: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: stest_f64i32:
@@ -120,7 +124,9 @@ define i32 @utest_f64i32(double %x) {
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: utest_f64i32:
@@ -137,7 +143,9 @@ define i32 @utest_f64i32(double %x) {
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB1_2: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: utest_f64i32:
@@ -196,7 +204,9 @@ define i32 @ustest_f64i32(double %x) {
 ; RV32IF-NEXT:    neg a1, a1
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: ustest_f64i32:
@@ -216,7 +226,9 @@ define i32 @ustest_f64i32(double %x) {
 ; RV64IF-NEXT:    neg a1, a1
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: ustest_f64i32:
@@ -386,7 +398,9 @@ define i32 @stest_f16i32(half %x) {
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:  .LBB6_9: # %entry
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f16i32:
@@ -408,7 +422,9 @@ define i32 @stest_f16i32(half %x) {
 ; RV64-NEXT:    lui a0, 524288
 ; RV64-NEXT:  .LBB6_4: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i64
@@ -435,7 +451,9 @@ define i32 @utesth_f16i32(half %x) {
 ; RV32-NEXT:    addi a1, a1, -1
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utesth_f16i32:
@@ -453,7 +471,9 @@ define i32 @utesth_f16i32(half %x) {
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB7_2: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui half %x to i64
@@ -493,7 +513,9 @@ define i32 @ustest_f16i32(half %x) {
 ; RV32-NEXT:    neg a1, a1
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f16i32:
@@ -514,7 +536,9 @@ define i32 @ustest_f16i32(half %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i64
@@ -548,7 +572,9 @@ define i16 @stest_f64i16(double %x) {
 ; RV32IF-NEXT:    lui a0, 1048568
 ; RV32IF-NEXT:  .LBB9_4: # %entry
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: stest_f64i16:
@@ -570,7 +596,9 @@ define i16 @stest_f64i16(double %x) {
 ; RV64IF-NEXT:    lui a0, 1048568
 ; RV64IF-NEXT:  .LBB9_4: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: stest_f64i16:
@@ -635,7 +663,9 @@ define i16 @utest_f64i16(double %x) {
 ; RV32IF-NEXT:    mv a0, a1
 ; RV32IF-NEXT:  .LBB10_2: # %entry
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: utest_f64i16:
@@ -652,7 +682,9 @@ define i16 @utest_f64i16(double %x) {
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB10_2: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: utest_f64i16:
@@ -702,7 +734,9 @@ define i16 @ustest_f64i16(double %x) {
 ; RV32IF-NEXT:    neg a1, a1
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: ustest_f64i16:
@@ -722,7 +756,9 @@ define i16 @ustest_f64i16(double %x) {
 ; RV64IF-NEXT:    neg a1, a1
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: ustest_f64i16:
@@ -899,7 +935,9 @@ define i16 @stest_f16i16(half %x) {
 ; RV32-NEXT:    lui a0, 1048568
 ; RV32-NEXT:  .LBB15_4: # %entry
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f16i16:
@@ -922,7 +960,9 @@ define i16 @stest_f16i16(half %x) {
 ; RV64-NEXT:    lui a0, 1048568
 ; RV64-NEXT:  .LBB15_4: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i32
@@ -950,7 +990,9 @@ define i16 @utesth_f16i16(half %x) {
 ; RV32-NEXT:    mv a0, a1
 ; RV32-NEXT:  .LBB16_2: # %entry
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utesth_f16i16:
@@ -968,7 +1010,9 @@ define i16 @utesth_f16i16(half %x) {
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB16_2: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui half %x to i32
@@ -997,7 +1041,9 @@ define i16 @ustest_f16i16(half %x) {
 ; RV32-NEXT:    neg a1, a1
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f16i16:
@@ -1018,7 +1064,9 @@ define i16 @ustest_f16i16(half %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i32
@@ -1092,7 +1140,9 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:    neg a0, a0
 ; RV32IF-NEXT:    and a0, a0, a3
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: stest_f64i64:
@@ -1131,7 +1181,9 @@ define i64 @stest_f64i64(double %x) {
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB18_9: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: stest_f64i64:
@@ -1191,7 +1243,9 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:    neg a0, a0
 ; RV32IFD-NEXT:    and a0, a0, a3
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: stest_f64i64:
@@ -1238,7 +1292,9 @@ define i64 @utest_f64i64(double %x) {
 ; RV32IF-NEXT:    and a0, a1, a3
 ; RV32IF-NEXT:    and a1, a1, a2
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64-LABEL: utest_f64i64:
@@ -1252,7 +1308,9 @@ define i64 @utest_f64i64(double %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32IFD-LABEL: utest_f64i64:
@@ -1278,7 +1336,9 @@ define i64 @utest_f64i64(double %x) {
 ; RV32IFD-NEXT:    and a0, a1, a3
 ; RV32IFD-NEXT:    and a1, a1, a2
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 entry:
   %conv = fptoui double %x to i128
@@ -1340,7 +1400,9 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IF-NEXT:    and a0, a1, a2
 ; RV32IF-NEXT:    and a1, a1, a4
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f64i64:
@@ -1367,7 +1429,9 @@ define i64 @ustest_f64i64(double %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32IFD-LABEL: ustest_f64i64:
@@ -1419,7 +1483,9 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IFD-NEXT:    and a0, a1, a2
 ; RV32IFD-NEXT:    and a1, a1, a4
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 entry:
   %conv = fptosi double %x to i128
@@ -1489,7 +1555,9 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    and a0, a0, a3
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f32i64:
@@ -1534,7 +1602,9 @@ define i64 @utest_f32i64(float %x) {
 ; RV32-NEXT:    and a0, a1, a3
 ; RV32-NEXT:    and a1, a1, a2
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utest_f32i64:
@@ -1548,7 +1618,9 @@ define i64 @utest_f32i64(float %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui float %x to i128
@@ -1608,7 +1680,9 @@ define i64 @ustest_f32i64(float %x) {
 ; RV32-NEXT:    and a0, a1, a2
 ; RV32-NEXT:    and a1, a1, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f32i64:
@@ -1635,7 +1709,9 @@ define i64 @ustest_f32i64(float %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi float %x to i128
@@ -1706,7 +1782,9 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    and a0, a0, a3
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f16i64:
@@ -1746,7 +1824,9 @@ define i64 @stest_f16i64(half %x) {
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB24_9: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i128
@@ -1783,7 +1863,9 @@ define i64 @utesth_f16i64(half %x) {
 ; RV32-NEXT:    and a0, a1, a3
 ; RV32-NEXT:    and a1, a1, a2
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utesth_f16i64:
@@ -1798,7 +1880,9 @@ define i64 @utesth_f16i64(half %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui half %x to i128
@@ -1859,7 +1943,9 @@ define i64 @ustest_f16i64(half %x) {
 ; RV32-NEXT:    and a0, a1, a2
 ; RV32-NEXT:    and a1, a1, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f16i64:
@@ -1887,7 +1973,9 @@ define i64 @ustest_f16i64(half %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i128
@@ -1941,7 +2029,9 @@ define i32 @stest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    lui a0, 524288
 ; RV32IF-NEXT:  .LBB27_9: # %entry
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: stest_f64i32_mm:
@@ -1962,7 +2052,9 @@ define i32 @stest_f64i32_mm(double %x) {
 ; RV64IF-NEXT:    lui a0, 524288
 ; RV64IF-NEXT:  .LBB27_4: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: stest_f64i32_mm:
@@ -2010,7 +2102,9 @@ define i32 @utest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: utest_f64i32_mm:
@@ -2027,7 +2121,9 @@ define i32 @utest_f64i32_mm(double %x) {
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB28_2: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: utest_f64i32_mm:
@@ -2079,7 +2175,9 @@ define i32 @ustest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: ustest_f64i32_mm:
@@ -2099,7 +2197,9 @@ define i32 @ustest_f64i32_mm(double %x) {
 ; RV64IF-NEXT:    neg a1, a1
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: ustest_f64i32_mm:
@@ -2262,7 +2362,9 @@ define i32 @stest_f16i32_mm(half %x) {
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:  .LBB33_9: # %entry
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f16i32_mm:
@@ -2284,7 +2386,9 @@ define i32 @stest_f16i32_mm(half %x) {
 ; RV64-NEXT:    lui a0, 524288
 ; RV64-NEXT:  .LBB33_4: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i64
@@ -2307,7 +2411,9 @@ define i32 @utesth_f16i32_mm(half %x) {
 ; RV32-NEXT:    addi a1, a1, -1
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utesth_f16i32_mm:
@@ -2325,7 +2431,9 @@ define i32 @utesth_f16i32_mm(half %x) {
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB34_2: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui half %x to i64
@@ -2358,7 +2466,9 @@ define i32 @ustest_f16i32_mm(half %x) {
 ; RV32-NEXT:    addi a1, a1, -1
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f16i32_mm:
@@ -2379,7 +2489,9 @@ define i32 @ustest_f16i32_mm(half %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i64
@@ -2411,7 +2523,9 @@ define i16 @stest_f64i16_mm(double %x) {
 ; RV32IF-NEXT:    lui a0, 1048568
 ; RV32IF-NEXT:  .LBB36_4: # %entry
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: stest_f64i16_mm:
@@ -2433,7 +2547,9 @@ define i16 @stest_f64i16_mm(double %x) {
 ; RV64IF-NEXT:    lui a0, 1048568
 ; RV64IF-NEXT:  .LBB36_4: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: stest_f64i16_mm:
@@ -2496,7 +2612,9 @@ define i16 @utest_f64i16_mm(double %x) {
 ; RV32IF-NEXT:    mv a0, a1
 ; RV32IF-NEXT:  .LBB37_2: # %entry
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: utest_f64i16_mm:
@@ -2513,7 +2631,9 @@ define i16 @utest_f64i16_mm(double %x) {
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB37_2: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: utest_f64i16_mm:
@@ -2562,7 +2682,9 @@ define i16 @ustest_f64i16_mm(double %x) {
 ; RV32IF-NEXT:    neg a1, a1
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: ustest_f64i16_mm:
@@ -2582,7 +2704,9 @@ define i16 @ustest_f64i16_mm(double %x) {
 ; RV64IF-NEXT:    neg a1, a1
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: ustest_f64i16_mm:
@@ -2752,7 +2876,9 @@ define i16 @stest_f16i16_mm(half %x) {
 ; RV32-NEXT:    lui a0, 1048568
 ; RV32-NEXT:  .LBB42_4: # %entry
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f16i16_mm:
@@ -2775,7 +2901,9 @@ define i16 @stest_f16i16_mm(half %x) {
 ; RV64-NEXT:    lui a0, 1048568
 ; RV64-NEXT:  .LBB42_4: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i32
@@ -2801,7 +2929,9 @@ define i16 @utesth_f16i16_mm(half %x) {
 ; RV32-NEXT:    mv a0, a1
 ; RV32-NEXT:  .LBB43_2: # %entry
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utesth_f16i16_mm:
@@ -2819,7 +2949,9 @@ define i16 @utesth_f16i16_mm(half %x) {
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB43_2: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui half %x to i32
@@ -2847,7 +2979,9 @@ define i16 @ustest_f16i16_mm(half %x) {
 ; RV32-NEXT:    neg a1, a1
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f16i16_mm:
@@ -2868,7 +3002,9 @@ define i16 @ustest_f16i16_mm(half %x) {
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i32
@@ -2940,7 +3076,9 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    neg a0, a0
 ; RV32IF-NEXT:    and a0, a0, a3
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: stest_f64i64_mm:
@@ -2979,7 +3117,9 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB45_9: # %entry
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    .cfi_restore ra
 ; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: stest_f64i64_mm:
@@ -3039,7 +3179,9 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    neg a0, a0
 ; RV32IFD-NEXT:    and a0, a0, a3
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: stest_f64i64_mm:
@@ -3084,7 +3226,9 @@ define i64 @utest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    and a0, a1, a3
 ; RV32IF-NEXT:    and a1, a1, a2
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64-LABEL: utest_f64i64_mm:
@@ -3098,7 +3242,9 @@ define i64 @utest_f64i64_mm(double %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32IFD-LABEL: utest_f64i64_mm:
@@ -3124,7 +3270,9 @@ define i64 @utest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    and a0, a1, a3
 ; RV32IFD-NEXT:    and a1, a1, a2
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 entry:
   %conv = fptoui double %x to i128
@@ -3169,7 +3317,9 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    and a0, a3, a1
 ; RV32IF-NEXT:    and a1, a3, a2
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
+; RV32IF-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f64i64_mm:
@@ -3191,7 +3341,9 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32IFD-LABEL: ustest_f64i64_mm:
@@ -3227,7 +3379,9 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    and a0, a3, a1
 ; RV32IFD-NEXT:    and a1, a3, a2
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 entry:
   %conv = fptosi double %x to i128
@@ -3295,7 +3449,9 @@ define i64 @stest_f32i64_mm(float %x) {
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    and a0, a0, a3
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f32i64_mm:
@@ -3338,7 +3494,9 @@ define i64 @utest_f32i64_mm(float %x) {
 ; RV32-NEXT:    and a0, a1, a3
 ; RV32-NEXT:    and a1, a1, a2
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utest_f32i64_mm:
@@ -3352,7 +3510,9 @@ define i64 @utest_f32i64_mm(float %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui float %x to i128
@@ -3395,7 +3555,9 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV32-NEXT:    and a0, a3, a1
 ; RV32-NEXT:    and a1, a3, a2
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f32i64_mm:
@@ -3417,7 +3579,9 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi float %x to i128
@@ -3486,7 +3650,9 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    and a0, a0, a3
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stest_f16i64_mm:
@@ -3526,7 +3692,9 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB51_9: # %entry
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i128
@@ -3561,7 +3729,9 @@ define i64 @utesth_f16i64_mm(half %x) {
 ; RV32-NEXT:    and a0, a1, a3
 ; RV32-NEXT:    and a1, a1, a2
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: utesth_f16i64_mm:
@@ -3576,7 +3746,9 @@ define i64 @utesth_f16i64_mm(half %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptoui half %x to i128
@@ -3620,7 +3792,9 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV32-NEXT:    and a0, a3, a1
 ; RV32-NEXT:    and a1, a3, a2
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ustest_f16i64_mm:
@@ -3643,7 +3817,9 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV64-NEXT:    addi a1, a1, -1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   %conv = fptosi half %x to i128
diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll
index bc4f89e9177489..279904dda5afe2 100644
--- a/llvm/test/CodeGen/RISCV/frame-info.ll
+++ b/llvm/test/CodeGen/RISCV/frame-info.ll
@@ -29,7 +29,10 @@ define void @trivial() {
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: trivial:
@@ -44,7 +47,10 @@ define void @trivial() {
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
   ret void
 }
@@ -66,9 +72,13 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-NEXT:    mv sp, a0
 ; RV32-NEXT:    call callee_with_args
 ; RV32-NEXT:    addi sp, s0, -16
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stack_alloc:
@@ -89,9 +99,13 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-NEXT:    mv sp, a0
 ; RV64-NEXT:    call callee_with_args
 ; RV64-NEXT:    addi sp, s0, -16
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-WITHFP-LABEL: stack_alloc:
@@ -110,9 +124,13 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-WITHFP-NEXT:    mv sp, a0
 ; RV32-WITHFP-NEXT:    call callee_with_args
 ; RV32-WITHFP-NEXT:    addi sp, s0, -16
+; RV32-WITHFP-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: stack_alloc:
@@ -133,9 +151,13 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-WITHFP-NEXT:    mv sp, a0
 ; RV64-WITHFP-NEXT:    call callee_with_args
 ; RV64-WITHFP-NEXT:    addi sp, s0, -16
+; RV64-WITHFP-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -157,7 +179,9 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    call callee2
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: branch_and_tail_call:
@@ -173,7 +197,9 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    call callee2
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-WITHFP-LABEL: branch_and_tail_call:
@@ -194,7 +220,10 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-WITHFP-NEXT:    call callee2
 ; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    .cfi_restore ra
+; RV32-WITHFP-NEXT:    .cfi_restore s0
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-WITHFP-NEXT:    ret
 ;
 ; RV64-WITHFP-LABEL: branch_and_tail_call:
@@ -215,7 +244,10 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-WITHFP-NEXT:    call callee2
 ; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    .cfi_restore ra
+; RV64-WITHFP-NEXT:    .cfi_restore s0
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
   br i1 %a, label %blue_pill, label %red_pill
 blue_pill:
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index 17164e8da562a5..2f6c30ff210e37 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -524,7 +524,9 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) strictfp {
 ; CHECK32-D-NEXT:    seqz a1, a0
 ; CHECK32-D-NEXT:    add a0, a0, a1
 ; CHECK32-D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT:    .cfi_restore ra
 ; CHECK32-D-NEXT:    addi sp, sp, 16
+; CHECK32-D-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK32-D-NEXT:    ret
   %a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict")
   %b = icmp eq i32 %a, 0
@@ -2359,7 +2361,11 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) strictfp {
 ; CHECK32-D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-D-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; CHECK32-D-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT:    .cfi_restore ra
+; CHECK32-D-NEXT:    .cfi_restore s0
+; CHECK32-D-NEXT:    .cfi_restore s1
 ; CHECK32-D-NEXT:    addi sp, sp, 16
+; CHECK32-D-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK32-D-NEXT:    ret
   %3 = add i32 %0, 1
   %4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -2493,7 +2499,11 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) strictfp {
 ; CHECK32-D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK32-D-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; CHECK32-D-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT:    .cfi_restore ra
+; CHECK32-D-NEXT:    .cfi_restore s0
+; CHECK32-D-NEXT:    .cfi_restore s1
 ; CHECK32-D-NEXT:    addi sp, sp, 16
+; CHECK32-D-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK32-D-NEXT:    ret
   %3 = add i32 %0, 1
   %4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 81e29329e71817..617ecf9f2a9611 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -3020,7 +3020,12 @@ define half @maximumnum_half(half %x, half %y) {
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: maximumnum_half:
@@ -3051,7 +3056,12 @@ define half @maximumnum_half(half %x, half %y) {
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: maximumnum_half:
@@ -3114,7 +3124,12 @@ define half @minimumnum_half(half %x, half %y) {
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: minimumnum_half:
@@ -3145,7 +3160,12 @@ define half @minimumnum_half(half %x, half %y) {
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: minimumnum_half:
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll
index 8eeea074265753..8a787ee578990e 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll
@@ -325,7 +325,9 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_floor_si64:
@@ -351,7 +353,9 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_floor_si64:
@@ -389,7 +393,9 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_floor_si64:
@@ -429,7 +435,9 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_floor_si64:
@@ -762,7 +770,9 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_floor_ui64:
@@ -788,7 +798,9 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_floor_ui64:
@@ -826,7 +838,9 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_floor_ui64:
@@ -866,7 +880,9 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_floor_ui64:
@@ -1199,7 +1215,9 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_ceil_si64:
@@ -1225,7 +1243,9 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_ceil_si64:
@@ -1263,7 +1283,9 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_si64:
@@ -1303,7 +1325,9 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_ceil_si64:
@@ -1636,7 +1660,9 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_ceil_ui64:
@@ -1662,7 +1688,9 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_ceil_ui64:
@@ -1700,7 +1728,9 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_ui64:
@@ -1740,7 +1770,9 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_ceil_ui64:
@@ -2073,7 +2105,9 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_trunc_si64:
@@ -2099,7 +2133,9 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_trunc_si64:
@@ -2137,7 +2173,9 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_si64:
@@ -2177,7 +2215,9 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_trunc_si64:
@@ -2510,7 +2550,9 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_trunc_ui64:
@@ -2536,7 +2578,9 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_trunc_ui64:
@@ -2574,7 +2618,9 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_ui64:
@@ -2614,7 +2660,9 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_trunc_ui64:
@@ -2947,7 +2995,9 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_round_si64:
@@ -2973,7 +3023,9 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_round_si64:
@@ -3011,7 +3063,9 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_round_si64:
@@ -3051,7 +3105,9 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_round_si64:
@@ -3384,7 +3440,9 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_round_ui64:
@@ -3410,7 +3468,9 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_round_ui64:
@@ -3448,7 +3508,9 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_round_ui64:
@@ -3488,7 +3550,9 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_round_ui64:
@@ -3821,7 +3885,9 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixhfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_roundeven_si64:
@@ -3847,7 +3913,9 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixhfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_roundeven_si64:
@@ -3885,7 +3953,9 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_si64:
@@ -3925,7 +3995,9 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixhfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_roundeven_si64:
@@ -4258,7 +4330,9 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    call __fixunshfdi
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    .cfi_restore ra
 ; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: test_roundeven_ui64:
@@ -4284,7 +4358,9 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZHINX-NEXT:    .cfi_offset ra, -4
 ; RV32IZHINX-NEXT:    call __fixunshfdi
 ; RV32IZHINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT:    .cfi_restore ra
 ; RV32IZHINX-NEXT:    addi sp, sp, 16
+; RV32IZHINX-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINX-NEXT:    ret
 ;
 ; RV64IZHINX-LABEL: test_roundeven_ui64:
@@ -4322,7 +4398,9 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT:    .cfi_restore ra
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
+; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_ui64:
@@ -4362,7 +4440,9 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV32IZHINXMIN-NEXT:    call __fixunshfdi
 ; RV32IZHINXMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT:    .cfi_restore ra
 ; RV32IZHINXMIN-NEXT:    addi sp, sp, 16
+; RV32IZHINXMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IZHINXMIN-NEXT:    ret
 ;
 ; RV64IZHINXMIN-LABEL: test_roundeven_ui64:
diff --git a/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll b/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll
index 12c95206d21bed..55b23512182062 100644
--- a/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll
+++ b/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll
@@ -12,7 +12,9 @@ define ptr @f2(ptr %x0, ptr %x1) {
 ; CHECK-NEXT:    mv t0, a1
 ; CHECK-NEXT:    call __hwasan_check_x10_2_short
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x1, ptr %x0, i32 2)
   ret ptr %x0
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index 6009a6c7e138ae..772d4a3e69002f 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -100,8 +100,11 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
 ; RV32-NEXT:    csrr a2, vlenb
 ; RV32-NEXT:    slli a2, a2, 1
 ; RV32-NEXT:    add sp, sp, a2
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ctz_nxv8i1_no_range:
diff --git a/llvm/test/CodeGen/RISCV/kcfi-mir.ll b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
index e478930d59abc5..52c2a9be8fefb4 100644
--- a/llvm/test/CodeGen/RISCV/kcfi-mir.ll
+++ b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
@@ -15,7 +15,9 @@ define void @f1(ptr noundef %x) !kcfi_type !1 {
   ; CHECK-NEXT:     PseudoCALLIndirect killed $x10, csr_ilp32_lp64, implicit-def dead $x1, implicit-def $x2
   ; CHECK-NEXT:   }
   ; CHECK-NEXT:   $x1 = LD $x2, 8 :: (load (s64) from %stack.0)
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x1
   ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 16
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 0
   ; CHECK-NEXT:   PseudoRET
   call void %x() [ "kcfi"(i32 12345678) ]
   ret void
diff --git a/llvm/test/CodeGen/RISCV/large-stack.ll b/llvm/test/CodeGen/RISCV/large-stack.ll
index 21a46e9819bc9a..1db33821c1e188 100644
--- a/llvm/test/CodeGen/RISCV/large-stack.ll
+++ b/llvm/test/CodeGen/RISCV/large-stack.ll
@@ -16,6 +16,7 @@ define void @test() {
 ; RV32I-FPELIM-NEXT:    lui a0, 74565
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 1664
 ; RV32I-FPELIM-NEXT:    add sp, sp, a0
+; RV32I-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: test:
@@ -34,9 +35,13 @@ define void @test() {
 ; RV32I-WITHFP-NEXT:    lui a0, 74565
 ; RV32I-WITHFP-NEXT:    addi a0, a0, -352
 ; RV32I-WITHFP-NEXT:    add sp, sp, a0
+; RV32I-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-WITHFP-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    .cfi_restore ra
+; RV32I-WITHFP-NEXT:    .cfi_restore s0
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 2032
+; RV32I-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-WITHFP-NEXT:    ret
   %tmp = alloca [ 305419896 x i8 ] , align 4
   ret void
@@ -71,9 +76,13 @@ define void @test_emergency_spill_slot(i32 %a) {
 ; RV32I-FPELIM-NEXT:    lui a0, 97
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 672
 ; RV32I-FPELIM-NEXT:    add sp, sp, a0
+; RV32I-FPELIM-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-FPELIM-NEXT:    lw s0, 2028(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    lw s1, 2024(sp) # 4-byte Folded Reload
+; RV32I-FPELIM-NEXT:    .cfi_restore s0
+; RV32I-FPELIM-NEXT:    .cfi_restore s1
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 2032
+; RV32I-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: test_emergency_spill_slot:
@@ -108,11 +117,17 @@ define void @test_emergency_spill_slot(i32 %a) {
 ; RV32I-WITHFP-NEXT:    lui a0, 97
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 688
 ; RV32I-WITHFP-NEXT:    add sp, sp, a0
+; RV32I-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-WITHFP-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    .cfi_restore ra
+; RV32I-WITHFP-NEXT:    .cfi_restore s0
+; RV32I-WITHFP-NEXT:    .cfi_restore s1
+; RV32I-WITHFP-NEXT:    .cfi_restore s2
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 2032
+; RV32I-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-WITHFP-NEXT:    ret
   %data = alloca [ 100000 x i32 ] , align 4
   %ptr = getelementptr inbounds [100000 x i32], ptr %data, i32 0, i32 80000
diff --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir
index fa6297a3913a9b..9f40870feb00f8 100644
--- a/llvm/test/CodeGen/RISCV/live-sp.mir
+++ b/llvm/test/CodeGen/RISCV/live-sp.mir
@@ -81,7 +81,9 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY $x0
     ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) @vararg, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit $x11, implicit-def $x2
     ; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.1)
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NEXT: PseudoRET
     SW renamable $x1, %stack.0.a, 0 :: (store (s32) into %ir.a)
     renamable $x11 = ADDIW killed renamable $x1, 0
diff --git a/llvm/test/CodeGen/RISCV/llvm.exp10.ll b/llvm/test/CodeGen/RISCV/llvm.exp10.ll
index 0941f6a73da280..00727b85620345 100644
--- a/llvm/test/CodeGen/RISCV/llvm.exp10.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.exp10.ll
@@ -37,7 +37,9 @@ define half @exp10_f16(half %x) {
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_f16:
@@ -54,7 +56,9 @@ define half @exp10_f16(half %x) {
 ; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
 ; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call half @llvm.exp10.f16(half %x)
   ret half %r
@@ -73,7 +77,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
 ; RV32IFD-NEXT:    call __truncsfhf2
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v1f16:
@@ -88,7 +94,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
 ; RV64IFD-NEXT:    call __truncsfhf2
 ; RV64IFD-NEXT:    fmv.x.w a0, fa0
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
 ; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <1 x half> @llvm.exp10.v1f16(<1 x half> %x)
   ret <1 x half> %r
@@ -120,7 +128,11 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore fs0
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v2f16:
@@ -148,7 +160,11 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore s1
 ; RV64IFD-NEXT:    addi sp, sp, 32
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
   ret <2 x half> %r
@@ -205,7 +221,14 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
 ; RV32IFD-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore s1
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
+; RV32IFD-NEXT:    .cfi_restore fs2
 ; RV32IFD-NEXT:    addi sp, sp, 48
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v3f16:
@@ -253,7 +276,13 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
 ; RV64IFD-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore s1
+; RV64IFD-NEXT:    .cfi_restore s2
+; RV64IFD-NEXT:    .cfi_restore fs0
 ; RV64IFD-NEXT:    addi sp, sp, 48
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <3 x half> @llvm.exp10.v3f16(<3 x half> %x)
   ret <3 x half> %r
@@ -326,7 +355,17 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
 ; RV32IFD-NEXT:    fld fs1, 24(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs2, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore s1
+; RV32IFD-NEXT:    .cfi_restore s2
+; RV32IFD-NEXT:    .cfi_restore s3
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
+; RV32IFD-NEXT:    .cfi_restore fs2
+; RV32IFD-NEXT:    .cfi_restore fs3
 ; RV32IFD-NEXT:    addi sp, sp, 64
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v4f16:
@@ -389,7 +428,16 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore s1
+; RV64IFD-NEXT:    .cfi_restore s2
+; RV64IFD-NEXT:    .cfi_restore s3
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
+; RV64IFD-NEXT:    .cfi_restore fs2
 ; RV64IFD-NEXT:    addi sp, sp, 64
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <4 x half> @llvm.exp10.v4f16(<4 x half> %x)
   ret <4 x half> %r
@@ -412,7 +460,9 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    call exp10f
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v1f32:
@@ -423,7 +473,9 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
 ; RV64IFD-NEXT:    .cfi_offset ra, -8
 ; RV64IFD-NEXT:    call exp10f
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
 ; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <1 x float> @llvm.exp10.v1f32(<1 x float> %x)
   ret <1 x float> %r
@@ -450,7 +502,11 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v2f32:
@@ -473,7 +529,11 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
 ; RV64IFD-NEXT:    addi sp, sp, 32
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
   ret <2 x float> %r
@@ -512,7 +572,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
+; RV32IFD-NEXT:    .cfi_restore fs2
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v3f32:
@@ -551,7 +617,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
 ; RV64IFD-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore s1
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
 ; RV64IFD-NEXT:    addi sp, sp, 48
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <3 x float> @llvm.exp10.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -598,7 +670,14 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
 ; RV32IFD-NEXT:    fld fs1, 24(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs2, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
+; RV32IFD-NEXT:    .cfi_restore fs2
+; RV32IFD-NEXT:    .cfi_restore fs3
 ; RV32IFD-NEXT:    addi sp, sp, 48
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v4f32:
@@ -641,7 +720,14 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
 ; RV64IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs3, 0(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
+; RV64IFD-NEXT:    .cfi_restore fs2
+; RV64IFD-NEXT:    .cfi_restore fs3
 ; RV64IFD-NEXT:    addi sp, sp, 48
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
   ret <4 x float> %r
@@ -682,7 +768,11 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v2f64:
@@ -705,7 +795,11 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
 ; RV64IFD-NEXT:    addi sp, sp, 32
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
   ret <2 x double> %r
@@ -744,7 +838,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
+; RV32IFD-NEXT:    .cfi_restore fs2
 ; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v3f64:
@@ -779,7 +879,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
 ; RV64IFD-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
+; RV64IFD-NEXT:    .cfi_restore fs2
 ; RV64IFD-NEXT:    addi sp, sp, 48
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <3 x double> @llvm.exp10.v3f64(<3 x double> %x)
   ret <3 x double> %r
@@ -826,7 +932,14 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
 ; RV32IFD-NEXT:    fld fs1, 24(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs2, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    .cfi_restore ra
+; RV32IFD-NEXT:    .cfi_restore s0
+; RV32IFD-NEXT:    .cfi_restore fs0
+; RV32IFD-NEXT:    .cfi_restore fs1
+; RV32IFD-NEXT:    .cfi_restore fs2
+; RV32IFD-NEXT:    .cfi_restore fs3
 ; RV32IFD-NEXT:    addi sp, sp, 48
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp10_v4f64:
@@ -869,7 +982,14 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
 ; RV64IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs3, 0(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    .cfi_restore ra
+; RV64IFD-NEXT:    .cfi_restore s0
+; RV64IFD-NEXT:    .cfi_restore fs0
+; RV64IFD-NEXT:    .cfi_restore fs1
+; RV64IFD-NEXT:    .cfi_restore fs2
+; RV64IFD-NEXT:    .cfi_restore fs3
 ; RV64IFD-NEXT:    addi sp, sp, 48
+; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IFD-NEXT:    ret
   %r = call <4 x double> @llvm.exp10.v4f64(<4 x double> %x)
   ret <4 x double> %r
diff --git a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
index 1d5487e19e894f..a6eb5795c99307 100644
--- a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
+++ b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
@@ -20,6 +20,7 @@ define void @use_frame_base_reg() {
 ; RV32I-NEXT:    lui a0, 24
 ; RV32I-NEXT:    addi a0, a0, 1712
 ; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: use_frame_base_reg:
@@ -36,6 +37,7 @@ define void @use_frame_base_reg() {
 ; RV64I-NEXT:    lui a0, 24
 ; RV64I-NEXT:    addiw a0, a0, 1712
 ; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 
   %va = alloca i8, align 4
@@ -59,6 +61,7 @@ define void @load_with_offset() {
 ; RV32I-NEXT:    sb a1, 0(a0)
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    addi sp, sp, 480
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: load_with_offset:
@@ -71,6 +74,7 @@ define void @load_with_offset() {
 ; RV64I-NEXT:    sb a1, 0(a0)
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    addi sp, sp, 480
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 
   %va = alloca [2500 x i8], align 4
@@ -92,6 +96,7 @@ define void @load_with_offset2() {
 ; RV32I-NEXT:    sb a0, 1412(sp)
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    addi sp, sp, 480
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: load_with_offset2:
@@ -103,6 +108,7 @@ define void @load_with_offset2() {
 ; RV64I-NEXT:    sb a0, 1412(sp)
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    addi sp, sp, 480
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 
   %va = alloca [2500 x i8], align 4
@@ -127,9 +133,13 @@ define void @frame_pointer() "frame-pointer"="all" {
 ; RV32I-NEXT:    lbu a0, -1960(s0)
 ; RV32I-NEXT:    sb a0, -1960(s0)
 ; RV32I-NEXT:    addi sp, sp, 480
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: frame_pointer:
@@ -147,9 +157,13 @@ define void @frame_pointer() "frame-pointer"="all" {
 ; RV64I-NEXT:    lbu a1, 0(a0)
 ; RV64I-NEXT:    sb a1, 0(a0)
 ; RV64I-NEXT:    addi sp, sp, 496
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 
   %va = alloca [2500 x i8], align 4
diff --git a/llvm/test/CodeGen/RISCV/lpad.ll b/llvm/test/CodeGen/RISCV/lpad.ll
index 049715b7dbb691..87e76ba0f5e5ae 100644
--- a/llvm/test/CodeGen/RISCV/lpad.ll
+++ b/llvm/test/CodeGen/RISCV/lpad.ll
@@ -149,7 +149,9 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; RV32-NEXT:  .Ltmp1:
 ; RV32-NEXT:  .LBB2_1: # %try.cont
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB2_2: # %lpad
 ; RV32-NEXT:  .Ltmp2:
@@ -167,7 +169,9 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; RV64-NEXT:  .Ltmp1:
 ; RV64-NEXT:  .LBB2_1: # %try.cont
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB2_2: # %lpad
 ; RV64-NEXT:  .Ltmp2:
@@ -186,7 +190,9 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; FIXED-ONE-RV32-NEXT:  .Ltmp1:
 ; FIXED-ONE-RV32-NEXT:  .LBB2_1: # %try.cont
 ; FIXED-ONE-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; FIXED-ONE-RV32-NEXT:    .cfi_restore ra
 ; FIXED-ONE-RV32-NEXT:    addi sp, sp, 16
+; FIXED-ONE-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; FIXED-ONE-RV32-NEXT:    ret
 ; FIXED-ONE-RV32-NEXT:  .LBB2_2: # %lpad
 ; FIXED-ONE-RV32-NEXT:  .Ltmp2:
@@ -205,7 +211,9 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; FIXED-ONE-RV64-NEXT:  .Ltmp1:
 ; FIXED-ONE-RV64-NEXT:  .LBB2_1: # %try.cont
 ; FIXED-ONE-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; FIXED-ONE-RV64-NEXT:    .cfi_restore ra
 ; FIXED-ONE-RV64-NEXT:    addi sp, sp, 16
+; FIXED-ONE-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; FIXED-ONE-RV64-NEXT:    ret
 ; FIXED-ONE-RV64-NEXT:  .LBB2_2: # %lpad
 ; FIXED-ONE-RV64-NEXT:  .Ltmp2:
diff --git a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
index 45db5078f150c4..71fad9180d304f 100644
--- a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
+++ b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
@@ -49,10 +49,15 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    call __cxa_end_catch
 ; CHECK-NEXT:    mv a0, s1
 ; CHECK-NEXT:    addi sp, s0, -32
+; CHECK-NEXT:    .cfi_def_cfa sp, 32
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
 ; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_4: # %ehcleanup
 ; CHECK-NEXT:    call _Unwind_Resume
diff --git a/llvm/test/CodeGen/RISCV/nontemporal.ll b/llvm/test/CodeGen/RISCV/nontemporal.ll
index 55bd32e4857345..c18aab87698bac 100644
--- a/llvm/test/CodeGen/RISCV/nontemporal.ll
+++ b/llvm/test/CodeGen/RISCV/nontemporal.ll
@@ -957,7 +957,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64-NEXT:    sb a2, 0(a0)
 ; CHECK-RV64-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
 ; CHECK-RV64-NEXT:    addi sp, sp, 16
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-RV32-LABEL: test_nontemporal_store_v16i8:
@@ -1018,7 +1021,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32-NEXT:    sb a2, 0(a0)
 ; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64C-LABEL: test_nontemporal_store_v16i8:
@@ -1079,7 +1085,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV64C-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64C-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64C-NEXT:    .cfi_restore s0
+; CHECK-RV64C-NEXT:    .cfi_restore s1
 ; CHECK-RV64C-NEXT:    addi sp, sp, 16
+; CHECK-RV64C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64C-NEXT:    ret
 ;
 ; CHECK-RV32C-LABEL: test_nontemporal_store_v16i8:
@@ -1140,7 +1149,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV32C-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32C-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32C-NEXT:    .cfi_restore s0
+; CHECK-RV32C-NEXT:    .cfi_restore s1
 ; CHECK-RV32C-NEXT:    addi sp, sp, 16
+; CHECK-RV32C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32C-NEXT:    ret
 ;
 ; CHECK-RV64V-LABEL: test_nontemporal_store_v16i8:
@@ -2371,7 +2383,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64-NEXT:    sb a2, 0(a0)
 ; CHECK-RV64-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
 ; CHECK-RV64-NEXT:    addi sp, sp, 16
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-RV32-LABEL: test_nontemporal_P1_store_v16i8:
@@ -2432,7 +2447,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32-NEXT:    sb a2, 0(a0)
 ; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64C-LABEL: test_nontemporal_P1_store_v16i8:
@@ -2493,7 +2511,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV64C-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64C-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64C-NEXT:    .cfi_restore s0
+; CHECK-RV64C-NEXT:    .cfi_restore s1
 ; CHECK-RV64C-NEXT:    addi sp, sp, 16
+; CHECK-RV64C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64C-NEXT:    ret
 ;
 ; CHECK-RV32C-LABEL: test_nontemporal_P1_store_v16i8:
@@ -2554,7 +2575,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV32C-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32C-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32C-NEXT:    .cfi_restore s0
+; CHECK-RV32C-NEXT:    .cfi_restore s1
 ; CHECK-RV32C-NEXT:    addi sp, sp, 16
+; CHECK-RV32C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32C-NEXT:    ret
 ;
 ; CHECK-RV64V-LABEL: test_nontemporal_P1_store_v16i8:
@@ -3785,7 +3809,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64-NEXT:    sb a2, 0(a0)
 ; CHECK-RV64-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
 ; CHECK-RV64-NEXT:    addi sp, sp, 16
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-RV32-LABEL: test_nontemporal_PALL_store_v16i8:
@@ -3846,7 +3873,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32-NEXT:    sb a2, 0(a0)
 ; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64C-LABEL: test_nontemporal_PALL_store_v16i8:
@@ -3907,7 +3937,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV64C-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64C-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64C-NEXT:    .cfi_restore s0
+; CHECK-RV64C-NEXT:    .cfi_restore s1
 ; CHECK-RV64C-NEXT:    addi sp, sp, 16
+; CHECK-RV64C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64C-NEXT:    ret
 ;
 ; CHECK-RV32C-LABEL: test_nontemporal_PALL_store_v16i8:
@@ -3968,7 +4001,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV32C-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32C-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32C-NEXT:    .cfi_restore s0
+; CHECK-RV32C-NEXT:    .cfi_restore s1
 ; CHECK-RV32C-NEXT:    addi sp, sp, 16
+; CHECK-RV32C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32C-NEXT:    ret
 ;
 ; CHECK-RV64V-LABEL: test_nontemporal_PALL_store_v16i8:
@@ -5199,7 +5235,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64-NEXT:    sb a2, 0(a0)
 ; CHECK-RV64-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
 ; CHECK-RV64-NEXT:    addi sp, sp, 16
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-RV32-LABEL: test_nontemporal_S1_store_v16i8:
@@ -5260,7 +5299,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32-NEXT:    sb a2, 0(a0)
 ; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64C-LABEL: test_nontemporal_S1_store_v16i8:
@@ -5321,7 +5363,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV64C-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64C-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64C-NEXT:    .cfi_restore s0
+; CHECK-RV64C-NEXT:    .cfi_restore s1
 ; CHECK-RV64C-NEXT:    addi sp, sp, 16
+; CHECK-RV64C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64C-NEXT:    ret
 ;
 ; CHECK-RV32C-LABEL: test_nontemporal_S1_store_v16i8:
@@ -5382,7 +5427,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV32C-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32C-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32C-NEXT:    .cfi_restore s0
+; CHECK-RV32C-NEXT:    .cfi_restore s1
 ; CHECK-RV32C-NEXT:    addi sp, sp, 16
+; CHECK-RV32C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32C-NEXT:    ret
 ;
 ; CHECK-RV64V-LABEL: test_nontemporal_S1_store_v16i8:
@@ -6613,7 +6661,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64-NEXT:    sb a2, 0(a0)
 ; CHECK-RV64-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    .cfi_restore s0
+; CHECK-RV64-NEXT:    .cfi_restore s1
 ; CHECK-RV64-NEXT:    addi sp, sp, 16
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-RV32-LABEL: test_nontemporal_ALL_store_v16i8:
@@ -6674,7 +6725,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32-NEXT:    sb a2, 0(a0)
 ; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    .cfi_restore s0
+; CHECK-RV32-NEXT:    .cfi_restore s1
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64C-LABEL: test_nontemporal_ALL_store_v16i8:
@@ -6735,7 +6789,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV64C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV64C-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64C-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64C-NEXT:    .cfi_restore s0
+; CHECK-RV64C-NEXT:    .cfi_restore s1
 ; CHECK-RV64C-NEXT:    addi sp, sp, 16
+; CHECK-RV64C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64C-NEXT:    ret
 ;
 ; CHECK-RV32C-LABEL: test_nontemporal_ALL_store_v16i8:
@@ -6796,7 +6853,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
 ; CHECK-RV32C-NEXT:    sb a6, 0(a0)
 ; CHECK-RV32C-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32C-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32C-NEXT:    .cfi_restore s0
+; CHECK-RV32C-NEXT:    .cfi_restore s1
 ; CHECK-RV32C-NEXT:    addi sp, sp, 16
+; CHECK-RV32C-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32C-NEXT:    ret
 ;
 ; CHECK-RV64V-LABEL: test_nontemporal_ALL_store_v16i8:
diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
index fe602b5b8fc2bc..4c40d7823b3a04 100644
--- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
@@ -457,7 +457,11 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddo6_xor_multi_use:
@@ -478,7 +482,10 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
 ; RV64-NEXT:    mv a0, s0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %x = xor i64 -1, %a
   %cmp = icmp ult i64 %x, %b
@@ -1117,7 +1124,16 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
 ; RV32-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
+; RV32-NEXT:    .cfi_restore s2
+; RV32-NEXT:    .cfi_restore s3
+; RV32-NEXT:    .cfi_restore s4
+; RV32-NEXT:    .cfi_restore s5
+; RV32-NEXT:    .cfi_restore s6
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usubo_ult_cmp_dominates_i64:
@@ -1161,7 +1177,14 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
 ; RV64-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
+; RV64-NEXT:    .cfi_restore s2
+; RV64-NEXT:    .cfi_restore s3
+; RV64-NEXT:    .cfi_restore s4
 ; RV64-NEXT:    addi sp, sp, 48
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 entry:
   br i1 %cond, label %t, label %f
diff --git a/llvm/test/CodeGen/RISCV/pr58025.ll b/llvm/test/CodeGen/RISCV/pr58025.ll
index 3cac2968b03d90..0031bd677c2fb7 100644
--- a/llvm/test/CodeGen/RISCV/pr58025.ll
+++ b/llvm/test/CodeGen/RISCV/pr58025.ll
@@ -9,6 +9,7 @@ define void @f() {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    sw a0, 12(sp)
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 BB:
   %B = fdiv <1 x float> <float 0.5>, <float 0.5>
diff --git a/llvm/test/CodeGen/RISCV/pr58286.ll b/llvm/test/CodeGen/RISCV/pr58286.ll
index d259baee3c044e..5e146b4b12ca21 100644
--- a/llvm/test/CodeGen/RISCV/pr58286.ll
+++ b/llvm/test/CodeGen/RISCV/pr58286.ll
@@ -47,6 +47,7 @@ define void @func() {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, 16
 ; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: func:
@@ -92,6 +93,7 @@ define void @func() {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, 16
 ; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
   %space = alloca i32, align 4
   %stackspace = alloca[1024 x i32], align 4
@@ -180,6 +182,7 @@ define void @shrink_wrap(i1 %c) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, 16
 ; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:  .LBB1_2: # %foo
 ; RV64I-NEXT:    ret
 ;
@@ -229,6 +232,7 @@ define void @shrink_wrap(i1 %c) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, 16
 ; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:  .LBB1_2: # %foo
 ; RV32I-NEXT:    ret
   %space = alloca i32, align 4
diff --git a/llvm/test/CodeGen/RISCV/pr63365.ll b/llvm/test/CodeGen/RISCV/pr63365.ll
index 3c7071a315b7af..09f1b3e53edfe1 100644
--- a/llvm/test/CodeGen/RISCV/pr63365.ll
+++ b/llvm/test/CodeGen/RISCV/pr63365.ll
@@ -14,6 +14,7 @@ define void @f() {
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 BB:
   %A1 = alloca ptr, align 8
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index 7084c04805be72..69746e3e70bfcf 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -763,6 +763,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; NOREMAT-NEXT:    li a1, 6
 ; NOREMAT-NEXT:    mul a0, a0, a1
 ; NOREMAT-NEXT:    add sp, sp, a0
+; NOREMAT-NEXT:    .cfi_def_cfa sp, 400
 ; NOREMAT-NEXT:    ld ra, 392(sp) # 8-byte Folded Reload
 ; NOREMAT-NEXT:    ld s0, 384(sp) # 8-byte Folded Reload
 ; NOREMAT-NEXT:    ld s1, 376(sp) # 8-byte Folded Reload
@@ -776,7 +777,21 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; NOREMAT-NEXT:    ld s9, 312(sp) # 8-byte Folded Reload
 ; NOREMAT-NEXT:    ld s10, 304(sp) # 8-byte Folded Reload
 ; NOREMAT-NEXT:    ld s11, 296(sp) # 8-byte Folded Reload
+; NOREMAT-NEXT:    .cfi_restore ra
+; NOREMAT-NEXT:    .cfi_restore s0
+; NOREMAT-NEXT:    .cfi_restore s1
+; NOREMAT-NEXT:    .cfi_restore s2
+; NOREMAT-NEXT:    .cfi_restore s3
+; NOREMAT-NEXT:    .cfi_restore s4
+; NOREMAT-NEXT:    .cfi_restore s5
+; NOREMAT-NEXT:    .cfi_restore s6
+; NOREMAT-NEXT:    .cfi_restore s7
+; NOREMAT-NEXT:    .cfi_restore s8
+; NOREMAT-NEXT:    .cfi_restore s9
+; NOREMAT-NEXT:    .cfi_restore s10
+; NOREMAT-NEXT:    .cfi_restore s11
 ; NOREMAT-NEXT:    addi sp, sp, 400
+; NOREMAT-NEXT:    .cfi_def_cfa_offset 0
 ; NOREMAT-NEXT:    ret
 ;
 ; REMAT-LABEL: test:
@@ -1533,7 +1548,21 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
 ; REMAT-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
 ; REMAT-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; REMAT-NEXT:    .cfi_restore ra
+; REMAT-NEXT:    .cfi_restore s0
+; REMAT-NEXT:    .cfi_restore s1
+; REMAT-NEXT:    .cfi_restore s2
+; REMAT-NEXT:    .cfi_restore s3
+; REMAT-NEXT:    .cfi_restore s4
+; REMAT-NEXT:    .cfi_restore s5
+; REMAT-NEXT:    .cfi_restore s6
+; REMAT-NEXT:    .cfi_restore s7
+; REMAT-NEXT:    .cfi_restore s8
+; REMAT-NEXT:    .cfi_restore s9
+; REMAT-NEXT:    .cfi_restore s10
+; REMAT-NEXT:    .cfi_restore s11
 ; REMAT-NEXT:    addi sp, sp, 112
+; REMAT-NEXT:    .cfi_def_cfa_offset 0
 ; REMAT-NEXT:    ret
   %4 = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 2, i64 1)
   %5 = tail call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> poison, ptr %0, i64 %4)
diff --git a/llvm/test/CodeGen/RISCV/pr88365.ll b/llvm/test/CodeGen/RISCV/pr88365.ll
index 4e4dead98ee69e..c569a91c1db4f2 100644
--- a/llvm/test/CodeGen/RISCV/pr88365.ll
+++ b/llvm/test/CodeGen/RISCV/pr88365.ll
@@ -15,8 +15,11 @@ define void @foo() {
 ; CHECK-NEXT:    call use
 ; CHECK-NEXT:    li a0, -2048
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
 ; CHECK-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %1 = alloca [1073741818 x i32], align 4
   call void @use(ptr %1)
diff --git a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
index 50b236470ae644..18cfa7233e4f74 100644
--- a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
+++ b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
@@ -20,7 +20,9 @@ define void @frame_16b() {
 ; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_16b:
@@ -32,7 +34,9 @@ define void @frame_16b() {
 ; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   call void @callee(ptr null)
   ret void
@@ -48,7 +52,9 @@ define void @frame_1024b() {
 ; RV32-NEXT:    addi a0, sp, 12
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    lw ra, 1020(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 1024
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_1024b:
@@ -60,7 +66,9 @@ define void @frame_1024b() {
 ; RV64-NEXT:    addi a0, sp, 8
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    ld ra, 1016(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 1024
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [1008 x i8]
   call void @callee(ptr %a)
@@ -79,8 +87,11 @@ define void @frame_2048b() {
 ; RV32-NEXT:    addi a0, sp, 12
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_2048b:
@@ -94,8 +105,11 @@ define void @frame_2048b() {
 ; RV64-NEXT:    addi a0, sp, 8
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [2032 x i8]
   call void @callee(ptr %a)
@@ -116,8 +130,11 @@ define void @frame_4096b() {
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    addi sp, sp, 2032
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_4096b:
@@ -133,8 +150,11 @@ define void @frame_4096b() {
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    addi sp, sp, 2032
 ; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [4080 x i8]
   call void @callee(ptr %a)
@@ -156,8 +176,11 @@ define void @frame_4kb() {
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_4kb:
@@ -173,8 +196,11 @@ define void @frame_4kb() {
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [6112 x i8]
   call void @callee(ptr %a)
@@ -197,8 +223,11 @@ define void @frame_4kb_offset_128() {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: frame_4kb_offset_128:
@@ -214,8 +243,11 @@ define void @frame_4kb_offset_128() {
 ; RV32ZBA-NEXT:    call callee
 ; RV32ZBA-NEXT:    li a0, 528
 ; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore ra
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64I-LABEL: frame_4kb_offset_128:
@@ -233,8 +265,11 @@ define void @frame_4kb_offset_128() {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, 128
 ; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: frame_4kb_offset_128:
@@ -250,8 +285,11 @@ define void @frame_4kb_offset_128() {
 ; RV64ZBA-NEXT:    call callee
 ; RV64ZBA-NEXT:    li a0, 528
 ; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    .cfi_restore ra
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBA-NEXT:    ret
   %a = alloca [6240 x i8]
   call void @callee(ptr %a)
@@ -274,8 +312,11 @@ define void @frame_8kb() {
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    lui a0, 2
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_8kb:
@@ -291,8 +332,11 @@ define void @frame_8kb() {
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    lui a0, 2
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [10208 x i8]
   call void @callee(ptr %a)
@@ -315,8 +359,11 @@ define void @frame_8kb_offset_128() {
 ; RV32I-NEXT:    lui a0, 2
 ; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: frame_8kb_offset_128:
@@ -332,8 +379,11 @@ define void @frame_8kb_offset_128() {
 ; RV32ZBA-NEXT:    call callee
 ; RV32ZBA-NEXT:    li a0, 1040
 ; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore ra
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64I-LABEL: frame_8kb_offset_128:
@@ -351,8 +401,11 @@ define void @frame_8kb_offset_128() {
 ; RV64I-NEXT:    lui a0, 2
 ; RV64I-NEXT:    addiw a0, a0, 128
 ; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: frame_8kb_offset_128:
@@ -368,8 +421,11 @@ define void @frame_8kb_offset_128() {
 ; RV64ZBA-NEXT:    call callee
 ; RV64ZBA-NEXT:    li a0, 1040
 ; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    .cfi_restore ra
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBA-NEXT:    ret
   %a = alloca [10336 x i8]
   call void @callee(ptr %a)
@@ -392,8 +448,11 @@ define void @frame_16kb_minus_80() {
 ; RV32I-NEXT:    lui a0, 4
 ; RV32I-NEXT:    addi a0, a0, -80
 ; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: frame_16kb_minus_80:
@@ -409,8 +468,11 @@ define void @frame_16kb_minus_80() {
 ; RV32ZBA-NEXT:    call callee
 ; RV32ZBA-NEXT:    li a0, 2038
 ; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore ra
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64I-LABEL: frame_16kb_minus_80:
@@ -428,8 +490,11 @@ define void @frame_16kb_minus_80() {
 ; RV64I-NEXT:    lui a0, 4
 ; RV64I-NEXT:    addiw a0, a0, -80
 ; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: frame_16kb_minus_80:
@@ -445,8 +510,11 @@ define void @frame_16kb_minus_80() {
 ; RV64ZBA-NEXT:    call callee
 ; RV64ZBA-NEXT:    li a0, 2038
 ; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    .cfi_restore ra
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBA-NEXT:    ret
   %a = alloca [18320 x i8]
   call void @callee(ptr %a)
@@ -468,8 +536,11 @@ define void @frame_16kb() {
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    lui a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_16kb:
@@ -485,8 +556,11 @@ define void @frame_16kb() {
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    lui a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [18400 x i8]
   call void @callee(ptr %a)
@@ -508,8 +582,11 @@ define void @frame_32kb() {
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    lui a0, 8
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: frame_32kb:
@@ -525,8 +602,11 @@ define void @frame_32kb() {
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    lui a0, 8
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = alloca [34784 x i8]
   call void @callee(ptr %a)
diff --git a/llvm/test/CodeGen/RISCV/push-pop-opt-crash.ll b/llvm/test/CodeGen/RISCV/push-pop-opt-crash.ll
index 5edf3cf49e25dd..e52d0a3855a084 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-opt-crash.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-opt-crash.ll
@@ -22,6 +22,8 @@ define  dso_local void @f0() local_unnamed_addr {
 ; RV32IZCMP-NEXT: 	.cfi_offset ra, -4
 ; RV32IZCMP-NEXT: 	call	f1
 ; RV32IZCMP-NEXT: 	cm.pop	{ra}, 16
+; RV32IZCMP-NEXT: 	.cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT: 	.cfi_restore ra
 ; RV32IZCMP-NEXT: .LBB0_2:                                # %if.F
 ; RV32IZCMP-NEXT: 	tail	f2
 ; RV32IZCMP-NEXT: .Lfunc_end0:
@@ -36,6 +38,8 @@ define  dso_local void @f0() local_unnamed_addr {
 ; RV64IZCMP-NEXT: 	.cfi_offset ra, -8
 ; RV64IZCMP-NEXT: 	call	f1
 ; RV64IZCMP-NEXT: 	cm.pop	{ra}, 16
+; RV64IZCMP-NEXT: 	.cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT: 	.cfi_restore ra
 ; RV64IZCMP-NEXT: .LBB0_2:                                # %if.F
 ; RV64IZCMP-NEXT: 	tail	f2
 ; RV64IZCMP-NEXT: .Lfunc_end0:
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 85c2997e268a94..c856d9e336b34e 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -25,8 +25,13 @@ define i32 @foo() {
 ; RV32IZCMP-NEXT:    .cfi_offset ra, -4
 ; RV32IZCMP-NEXT:    mv a0, sp
 ; RV32IZCMP-NEXT:    call test
+; RV32IZCMP-NEXT:    li a0, 0
 ; RV32IZCMP-NEXT:    addi sp, sp, 464
-; RV32IZCMP-NEXT:    cm.popretz {ra}, 64
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IZCMP-NEXT:    cm.pop {ra}, 64
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: foo:
 ; RV64IZCMP:       # %bb.0:
@@ -36,8 +41,13 @@ define i32 @foo() {
 ; RV64IZCMP-NEXT:    .cfi_offset ra, -8
 ; RV64IZCMP-NEXT:    mv a0, sp
 ; RV64IZCMP-NEXT:    call test
+; RV64IZCMP-NEXT:    li a0, 0
 ; RV64IZCMP-NEXT:    addi sp, sp, 464
-; RV64IZCMP-NEXT:    cm.popretz {ra}, 64
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IZCMP-NEXT:    cm.pop {ra}, 64
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: foo:
 ; RV32IZCMP-SR:       # %bb.0:
@@ -47,8 +57,13 @@ define i32 @foo() {
 ; RV32IZCMP-SR-NEXT:    .cfi_offset ra, -4
 ; RV32IZCMP-SR-NEXT:    mv a0, sp
 ; RV32IZCMP-SR-NEXT:    call test
+; RV32IZCMP-SR-NEXT:    li a0, 0
 ; RV32IZCMP-SR-NEXT:    addi sp, sp, 464
-; RV32IZCMP-SR-NEXT:    cm.popretz {ra}, 64
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV32IZCMP-SR-NEXT:    cm.pop {ra}, 64
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: foo:
 ; RV64IZCMP-SR:       # %bb.0:
@@ -58,8 +73,13 @@ define i32 @foo() {
 ; RV64IZCMP-SR-NEXT:    .cfi_offset ra, -8
 ; RV64IZCMP-SR-NEXT:    mv a0, sp
 ; RV64IZCMP-SR-NEXT:    call test
+; RV64IZCMP-SR-NEXT:    li a0, 0
 ; RV64IZCMP-SR-NEXT:    addi sp, sp, 464
-; RV64IZCMP-SR-NEXT:    cm.popretz {ra}, 64
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV64IZCMP-SR-NEXT:    cm.pop {ra}, 64
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: foo:
 ; RV32I:       # %bb.0:
@@ -71,7 +91,9 @@ define i32 @foo() {
 ; RV32I-NEXT:    call test
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    lw ra, 524(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 528
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: foo:
@@ -84,7 +106,9 @@ define i32 @foo() {
 ; RV64I-NEXT:    call test
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ld ra, 520(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 528
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
   %1 = alloca [512 x i8]
   %2 = getelementptr [512 x i8], ptr %1, i32 0, i32 0
@@ -106,8 +130,14 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
 ; RV32IZCMP-NEXT:    call callee_void
+; RV32IZCMP-NEXT:    li a0, 0
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
-; RV32IZCMP-NEXT:    cm.popretz {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: pushpopret0:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -124,8 +154,14 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
 ; RV64IZCMP-NEXT:    call callee_void
+; RV64IZCMP-NEXT:    li a0, 0
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
-; RV64IZCMP-NEXT:    cm.popretz {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: pushpopret0:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -140,8 +176,14 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
 ; RV32IZCMP-SR-NEXT:    call callee_void
+; RV32IZCMP-SR-NEXT:    li a0, 0
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV32IZCMP-SR-NEXT:    cm.popretz {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: pushpopret0:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -158,8 +200,14 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
 ; RV64IZCMP-SR-NEXT:    call callee_void
+; RV64IZCMP-SR-NEXT:    li a0, 0
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV64IZCMP-SR-NEXT:    cm.popretz {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: pushpopret0:
 ; RV32I:       # %bb.0: # %entry
@@ -178,9 +226,13 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    addi sp, s0, -16
+; RV32I-NEXT:    .cfi_def_cfa sp, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: pushpopret0:
@@ -202,9 +254,13 @@ define i32 @pushpopret0(i32 signext %size){
 ; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    addi sp, s0, -16
+; RV64I-NEXT:    .cfi_def_cfa sp, 16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -228,7 +284,12 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    li a0, 1
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
-; RV32IZCMP-NEXT:    cm.popret {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: pushpopret1:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -247,7 +308,12 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    li a0, 1
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
-; RV64IZCMP-NEXT:    cm.popret {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: pushpopret1:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -264,7 +330,12 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    li a0, 1
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: pushpopret1:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -283,7 +354,12 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    li a0, 1
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: pushpopret1:
 ; RV32I:       # %bb.0: # %entry
@@ -302,9 +378,13 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    addi sp, s0, -16
+; RV32I-NEXT:    .cfi_def_cfa sp, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: pushpopret1:
@@ -326,9 +406,13 @@ define i32 @pushpopret1(i32 signext %size) {
 ; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    addi sp, s0, -16
+; RV64I-NEXT:    .cfi_def_cfa sp, 16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -352,7 +436,12 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    li a0, -1
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
-; RV32IZCMP-NEXT:    cm.popret {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: pushpopretneg1:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -371,7 +460,12 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    li a0, -1
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
-; RV64IZCMP-NEXT:    cm.popret {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: pushpopretneg1:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -388,7 +482,12 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    li a0, -1
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: pushpopretneg1:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -407,7 +506,12 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    li a0, -1
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: pushpopretneg1:
 ; RV32I:       # %bb.0: # %entry
@@ -426,9 +530,13 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    addi sp, s0, -16
+; RV32I-NEXT:    .cfi_def_cfa sp, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: pushpopretneg1:
@@ -450,9 +558,13 @@ define i32 @pushpopretneg1(i32 signext %size) {
 ; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    addi sp, s0, -16
+; RV64I-NEXT:    .cfi_def_cfa sp, 16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -476,7 +588,12 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV32IZCMP-NEXT:    call callee_void
 ; RV32IZCMP-NEXT:    li a0, 2
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
-; RV32IZCMP-NEXT:    cm.popret {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: pushpopret2:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -495,7 +612,12 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64IZCMP-NEXT:    call callee_void
 ; RV64IZCMP-NEXT:    li a0, 2
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
-; RV64IZCMP-NEXT:    cm.popret {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: pushpopret2:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -512,7 +634,12 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    call callee_void
 ; RV32IZCMP-SR-NEXT:    li a0, 2
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: pushpopret2:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -531,7 +658,12 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    call callee_void
 ; RV64IZCMP-SR-NEXT:    li a0, 2
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: pushpopret2:
 ; RV32I:       # %bb.0: # %entry
@@ -550,9 +682,13 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV32I-NEXT:    call callee_void
 ; RV32I-NEXT:    li a0, 2
 ; RV32I-NEXT:    addi sp, s0, -16
+; RV32I-NEXT:    .cfi_def_cfa sp, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: pushpopret2:
@@ -574,9 +710,13 @@ define i32 @pushpopret2(i32 signext %size) {
 ; RV64I-NEXT:    call callee_void
 ; RV64I-NEXT:    li a0, 2
 ; RV64I-NEXT:    addi sp, s0, -16
+; RV64I-NEXT:    .cfi_def_cfa sp, 16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -598,7 +738,11 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV32IZCMP-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-NEXT:    mv sp, a0
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
+; RV32IZCMP-NEXT:    .cfi_def_cfa sp, 16
 ; RV32IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
 ; RV32IZCMP-NEXT:    tail callee
 ;
 ; RV64IZCMP-LABEL: tailcall:
@@ -616,7 +760,11 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64IZCMP-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-NEXT:    mv sp, a0
 ; RV64IZCMP-NEXT:    addi sp, s0, -16
+; RV64IZCMP-NEXT:    .cfi_def_cfa sp, 16
 ; RV64IZCMP-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
 ; RV64IZCMP-NEXT:    tail callee
 ;
 ; RV32IZCMP-SR-LABEL: tailcall:
@@ -632,7 +780,11 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV32IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV32IZCMP-SR-NEXT:    mv sp, a0
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
 ; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
 ; RV32IZCMP-SR-NEXT:    tail callee
 ;
 ; RV64IZCMP-SR-LABEL: tailcall:
@@ -650,7 +802,11 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64IZCMP-SR-NEXT:    sub a0, sp, a0
 ; RV64IZCMP-SR-NEXT:    mv sp, a0
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa sp, 16
 ; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0}, 16
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
 ; RV64IZCMP-SR-NEXT:    tail callee
 ;
 ; RV32I-LABEL: tailcall:
@@ -668,9 +824,13 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
 ; RV32I-NEXT:    addi sp, s0, -16
+; RV32I-NEXT:    .cfi_def_cfa sp, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    tail callee
 ;
 ; RV64I-LABEL: tailcall:
@@ -690,9 +850,13 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
 ; RV64I-NEXT:    addi sp, s0, -16
+; RV64I-NEXT:    .cfi_def_cfa sp, 16
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    tail callee
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -738,7 +902,19 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32IZCMP-NEXT:    sw s3, %lo(var)(s1)
 ; RV32IZCMP-NEXT:    mv a0, s2
 ; RV32IZCMP-NEXT:    addi sp, s0, -48
+; RV32IZCMP-NEXT:    .cfi_def_cfa sp, 48
 ; RV32IZCMP-NEXT:    cm.pop {ra, s0-s8}, 48
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
+; RV32IZCMP-NEXT:    .cfi_restore s1
+; RV32IZCMP-NEXT:    .cfi_restore s2
+; RV32IZCMP-NEXT:    .cfi_restore s3
+; RV32IZCMP-NEXT:    .cfi_restore s4
+; RV32IZCMP-NEXT:    .cfi_restore s5
+; RV32IZCMP-NEXT:    .cfi_restore s6
+; RV32IZCMP-NEXT:    .cfi_restore s7
+; RV32IZCMP-NEXT:    .cfi_restore s8
 ; RV32IZCMP-NEXT:    tail callee
 ;
 ; RV64IZCMP-LABEL: nocompress:
@@ -779,7 +955,19 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64IZCMP-NEXT:    sw s3, %lo(var)(s1)
 ; RV64IZCMP-NEXT:    mv a0, s2
 ; RV64IZCMP-NEXT:    addi sp, s0, -80
+; RV64IZCMP-NEXT:    .cfi_def_cfa sp, 80
 ; RV64IZCMP-NEXT:    cm.pop {ra, s0-s8}, 80
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
+; RV64IZCMP-NEXT:    .cfi_restore s1
+; RV64IZCMP-NEXT:    .cfi_restore s2
+; RV64IZCMP-NEXT:    .cfi_restore s3
+; RV64IZCMP-NEXT:    .cfi_restore s4
+; RV64IZCMP-NEXT:    .cfi_restore s5
+; RV64IZCMP-NEXT:    .cfi_restore s6
+; RV64IZCMP-NEXT:    .cfi_restore s7
+; RV64IZCMP-NEXT:    .cfi_restore s8
 ; RV64IZCMP-NEXT:    tail callee
 ;
 ; RV32IZCMP-SR-LABEL: nocompress:
@@ -818,7 +1006,19 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32IZCMP-SR-NEXT:    sw s3, %lo(var)(s1)
 ; RV32IZCMP-SR-NEXT:    mv a0, s2
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -48
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa sp, 48
 ; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s8}, 48
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
+; RV32IZCMP-SR-NEXT:    .cfi_restore s1
+; RV32IZCMP-SR-NEXT:    .cfi_restore s2
+; RV32IZCMP-SR-NEXT:    .cfi_restore s3
+; RV32IZCMP-SR-NEXT:    .cfi_restore s4
+; RV32IZCMP-SR-NEXT:    .cfi_restore s5
+; RV32IZCMP-SR-NEXT:    .cfi_restore s6
+; RV32IZCMP-SR-NEXT:    .cfi_restore s7
+; RV32IZCMP-SR-NEXT:    .cfi_restore s8
 ; RV32IZCMP-SR-NEXT:    tail callee
 ;
 ; RV64IZCMP-SR-LABEL: nocompress:
@@ -859,7 +1059,19 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64IZCMP-SR-NEXT:    sw s3, %lo(var)(s1)
 ; RV64IZCMP-SR-NEXT:    mv a0, s2
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -80
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa sp, 80
 ; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s8}, 80
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
+; RV64IZCMP-SR-NEXT:    .cfi_restore s1
+; RV64IZCMP-SR-NEXT:    .cfi_restore s2
+; RV64IZCMP-SR-NEXT:    .cfi_restore s3
+; RV64IZCMP-SR-NEXT:    .cfi_restore s4
+; RV64IZCMP-SR-NEXT:    .cfi_restore s5
+; RV64IZCMP-SR-NEXT:    .cfi_restore s6
+; RV64IZCMP-SR-NEXT:    .cfi_restore s7
+; RV64IZCMP-SR-NEXT:    .cfi_restore s8
 ; RV64IZCMP-SR-NEXT:    tail callee
 ;
 ; RV32I-LABEL: nocompress:
@@ -908,6 +1120,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32I-NEXT:    sw s3, %lo(var)(s2)
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    addi sp, s0, -48
+; RV32I-NEXT:    .cfi_def_cfa sp, 48
 ; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
@@ -918,7 +1131,18 @@ define i32 @nocompress(i32 signext %size) {
 ; RV32I-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
+; RV32I-NEXT:    .cfi_restore s2
+; RV32I-NEXT:    .cfi_restore s3
+; RV32I-NEXT:    .cfi_restore s4
+; RV32I-NEXT:    .cfi_restore s5
+; RV32I-NEXT:    .cfi_restore s6
+; RV32I-NEXT:    .cfi_restore s7
+; RV32I-NEXT:    .cfi_restore s8
 ; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    tail callee
 ;
 ; RV64I-LABEL: nocompress:
@@ -969,6 +1193,7 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64I-NEXT:    sw s3, %lo(var)(s2)
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    addi sp, s0, -80
+; RV64I-NEXT:    .cfi_def_cfa sp, 80
 ; RV64I-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -979,7 +1204,18 @@ define i32 @nocompress(i32 signext %size) {
 ; RV64I-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s7, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s8, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
+; RV64I-NEXT:    .cfi_restore s2
+; RV64I-NEXT:    .cfi_restore s3
+; RV64I-NEXT:    .cfi_restore s4
+; RV64I-NEXT:    .cfi_restore s5
+; RV64I-NEXT:    .cfi_restore s6
+; RV64I-NEXT:    .cfi_restore s7
+; RV64I-NEXT:    .cfi_restore s8
 ; RV64I-NEXT:    addi sp, sp, 80
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    tail callee
 entry:
   %0 = alloca i8, i32 %size, align 16
@@ -1145,7 +1381,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
 ; RV32IZCMP-NEXT:    sw t0, %lo(var0+8)(a0)
 ; RV32IZCMP-NEXT:    sw a7, %lo(var0+4)(a0)
 ; RV32IZCMP-NEXT:    sw a6, %lo(var0)(a0)
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s4}, 32
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s4}, 32
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: many_args:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -1188,7 +1425,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
 ; RV64IZCMP-NEXT:    sw t0, %lo(var0+8)(a0)
 ; RV64IZCMP-NEXT:    sw a7, %lo(var0+4)(a0)
 ; RV64IZCMP-NEXT:    sw a6, %lo(var0)(a0)
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s4}, 48
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s4}, 48
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: many_args:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -1231,7 +1469,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
 ; RV32IZCMP-SR-NEXT:    sw t0, %lo(var0+8)(a0)
 ; RV32IZCMP-SR-NEXT:    sw a7, %lo(var0+4)(a0)
 ; RV32IZCMP-SR-NEXT:    sw a6, %lo(var0)(a0)
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s4}, 32
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s4}, 32
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: many_args:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -1274,7 +1513,8 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
 ; RV64IZCMP-SR-NEXT:    sw t0, %lo(var0+8)(a0)
 ; RV64IZCMP-SR-NEXT:    sw a7, %lo(var0+4)(a0)
 ; RV64IZCMP-SR-NEXT:    sw a6, %lo(var0)(a0)
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s4}, 48
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s4}, 48
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: many_args:
 ; RV32I:       # %bb.0: # %entry
@@ -1408,7 +1648,8 @@ define void @alloca(i32 %n) nounwind {
 ; RV32IZCMP-NEXT:    call notdead
 ; RV32IZCMP-NEXT:    mv sp, s1
 ; RV32IZCMP-NEXT:    addi sp, s0, -16
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s1}, 16
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s1}, 16
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: alloca:
 ; RV64IZCMP:       # %bb.0:
@@ -1424,7 +1665,8 @@ define void @alloca(i32 %n) nounwind {
 ; RV64IZCMP-NEXT:    call notdead
 ; RV64IZCMP-NEXT:    mv sp, s1
 ; RV64IZCMP-NEXT:    addi sp, s0, -32
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s1}, 32
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s1}, 32
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: alloca:
 ; RV32IZCMP-SR:       # %bb.0:
@@ -1438,7 +1680,8 @@ define void @alloca(i32 %n) nounwind {
 ; RV32IZCMP-SR-NEXT:    call notdead
 ; RV32IZCMP-SR-NEXT:    mv sp, s1
 ; RV32IZCMP-SR-NEXT:    addi sp, s0, -16
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 16
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s1}, 16
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: alloca:
 ; RV64IZCMP-SR:       # %bb.0:
@@ -1454,7 +1697,8 @@ define void @alloca(i32 %n) nounwind {
 ; RV64IZCMP-SR-NEXT:    call notdead
 ; RV64IZCMP-SR-NEXT:    mv sp, s1
 ; RV64IZCMP-SR-NEXT:    addi sp, s0, -32
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 32
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s1}, 32
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: alloca:
 ; RV32I:       # %bb.0:
@@ -1752,25 +1996,29 @@ define void @foo_no_irq() nounwind{
 ; RV32IZCMP:       # %bb.0:
 ; RV32IZCMP-NEXT:    cm.push {ra}, -16
 ; RV32IZCMP-NEXT:    call foo_test_irq
-; RV32IZCMP-NEXT:    cm.popret {ra}, 16
+; RV32IZCMP-NEXT:    cm.pop {ra}, 16
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: foo_no_irq:
 ; RV64IZCMP:       # %bb.0:
 ; RV64IZCMP-NEXT:    cm.push {ra}, -16
 ; RV64IZCMP-NEXT:    call foo_test_irq
-; RV64IZCMP-NEXT:    cm.popret {ra}, 16
+; RV64IZCMP-NEXT:    cm.pop {ra}, 16
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: foo_no_irq:
 ; RV32IZCMP-SR:       # %bb.0:
 ; RV32IZCMP-SR-NEXT:    cm.push {ra}, -16
 ; RV32IZCMP-SR-NEXT:    call foo_test_irq
-; RV32IZCMP-SR-NEXT:    cm.popret {ra}, 16
+; RV32IZCMP-SR-NEXT:    cm.pop {ra}, 16
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: foo_no_irq:
 ; RV64IZCMP-SR:       # %bb.0:
 ; RV64IZCMP-SR-NEXT:    cm.push {ra}, -16
 ; RV64IZCMP-SR-NEXT:    call foo_test_irq
-; RV64IZCMP-SR-NEXT:    cm.popret {ra}, 16
+; RV64IZCMP-SR-NEXT:    cm.pop {ra}, 16
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: foo_no_irq:
 ; RV32I:       # %bb.0:
@@ -2624,7 +2872,8 @@ define void @callee_no_irq() nounwind{
 ; RV32IZCMP-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
 ; RV32IZCMP-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; RV32IZCMP-NEXT:    sw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s11}, 96
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s11}, 96
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: callee_no_irq:
 ; RV64IZCMP:       # %bb.0:
@@ -2707,7 +2956,8 @@ define void @callee_no_irq() nounwind{
 ; RV64IZCMP-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
 ; RV64IZCMP-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
 ; RV64IZCMP-NEXT:    sw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s11}, 160
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: callee_no_irq:
 ; RV32IZCMP-SR:       # %bb.0:
@@ -2790,7 +3040,8 @@ define void @callee_no_irq() nounwind{
 ; RV32IZCMP-SR-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
 ; RV32IZCMP-SR-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; RV32IZCMP-SR-NEXT:    sw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s11}, 96
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s11}, 96
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: callee_no_irq:
 ; RV64IZCMP-SR:       # %bb.0:
@@ -2873,7 +3124,8 @@ define void @callee_no_irq() nounwind{
 ; RV64IZCMP-SR-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
 ; RV64IZCMP-SR-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
 ; RV64IZCMP-SR-NEXT:    sw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s11}, 160
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: callee_no_irq:
 ; RV32I:       # %bb.0:
@@ -3117,7 +3369,12 @@ define i32 @use_fp(i32 %x) {
 ; RV32IZCMP-NEXT:    mv a0, s0
 ; RV32IZCMP-NEXT:    call bar
 ; RV32IZCMP-NEXT:    mv a0, s1
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s1}, 32
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s1}, 32
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore ra
+; RV32IZCMP-NEXT:    .cfi_restore s0
+; RV32IZCMP-NEXT:    .cfi_restore s1
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: use_fp:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -3133,7 +3390,12 @@ define i32 @use_fp(i32 %x) {
 ; RV64IZCMP-NEXT:    mv a0, s0
 ; RV64IZCMP-NEXT:    call bar
 ; RV64IZCMP-NEXT:    mv a0, s1
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s1}, 48
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s1}, 48
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore ra
+; RV64IZCMP-NEXT:    .cfi_restore s0
+; RV64IZCMP-NEXT:    .cfi_restore s1
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: use_fp:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -3149,7 +3411,12 @@ define i32 @use_fp(i32 %x) {
 ; RV32IZCMP-SR-NEXT:    mv a0, s0
 ; RV32IZCMP-SR-NEXT:    call bar
 ; RV32IZCMP-SR-NEXT:    mv a0, s1
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 32
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s1}, 32
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore ra
+; RV32IZCMP-SR-NEXT:    .cfi_restore s0
+; RV32IZCMP-SR-NEXT:    .cfi_restore s1
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: use_fp:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -3165,7 +3432,12 @@ define i32 @use_fp(i32 %x) {
 ; RV64IZCMP-SR-NEXT:    mv a0, s0
 ; RV64IZCMP-SR-NEXT:    call bar
 ; RV64IZCMP-SR-NEXT:    mv a0, s1
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s1}, 48
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s1}, 48
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore ra
+; RV64IZCMP-SR-NEXT:    .cfi_restore s0
+; RV64IZCMP-SR-NEXT:    .cfi_restore s1
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: use_fp:
 ; RV32I:       # %bb.0: # %entry
@@ -3187,7 +3459,11 @@ define i32 @use_fp(i32 %x) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: use_fp:
@@ -3210,7 +3486,11 @@ define i32 @use_fp(i32 %x) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 entry:
   %var = alloca i32, align 4
@@ -3229,7 +3509,11 @@ define void @spill_x10() {
 ; RV32IZCMP-NEXT:    #APP
 ; RV32IZCMP-NEXT:    li s10, 0
 ; RV32IZCMP-NEXT:    #NO_APP
-; RV32IZCMP-NEXT:    cm.popret {ra, s0-s11}, 64
+; RV32IZCMP-NEXT:    cm.pop {ra, s0-s11}, 64
+; RV32IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-NEXT:    .cfi_restore s10
+; RV32IZCMP-NEXT:    .cfi_restore s11
+; RV32IZCMP-NEXT:    ret
 ;
 ; RV64IZCMP-LABEL: spill_x10:
 ; RV64IZCMP:       # %bb.0: # %entry
@@ -3240,7 +3524,11 @@ define void @spill_x10() {
 ; RV64IZCMP-NEXT:    #APP
 ; RV64IZCMP-NEXT:    li s10, 0
 ; RV64IZCMP-NEXT:    #NO_APP
-; RV64IZCMP-NEXT:    cm.popret {ra, s0-s11}, 112
+; RV64IZCMP-NEXT:    cm.pop {ra, s0-s11}, 112
+; RV64IZCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-NEXT:    .cfi_restore s10
+; RV64IZCMP-NEXT:    .cfi_restore s11
+; RV64IZCMP-NEXT:    ret
 ;
 ; RV32IZCMP-SR-LABEL: spill_x10:
 ; RV32IZCMP-SR:       # %bb.0: # %entry
@@ -3251,7 +3539,11 @@ define void @spill_x10() {
 ; RV32IZCMP-SR-NEXT:    #APP
 ; RV32IZCMP-SR-NEXT:    li s10, 0
 ; RV32IZCMP-SR-NEXT:    #NO_APP
-; RV32IZCMP-SR-NEXT:    cm.popret {ra, s0-s11}, 64
+; RV32IZCMP-SR-NEXT:    cm.pop {ra, s0-s11}, 64
+; RV32IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IZCMP-SR-NEXT:    .cfi_restore s10
+; RV32IZCMP-SR-NEXT:    .cfi_restore s11
+; RV32IZCMP-SR-NEXT:    ret
 ;
 ; RV64IZCMP-SR-LABEL: spill_x10:
 ; RV64IZCMP-SR:       # %bb.0: # %entry
@@ -3262,7 +3554,11 @@ define void @spill_x10() {
 ; RV64IZCMP-SR-NEXT:    #APP
 ; RV64IZCMP-SR-NEXT:    li s10, 0
 ; RV64IZCMP-SR-NEXT:    #NO_APP
-; RV64IZCMP-SR-NEXT:    cm.popret {ra, s0-s11}, 112
+; RV64IZCMP-SR-NEXT:    cm.pop {ra, s0-s11}, 112
+; RV64IZCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IZCMP-SR-NEXT:    .cfi_restore s10
+; RV64IZCMP-SR-NEXT:    .cfi_restore s11
+; RV64IZCMP-SR-NEXT:    ret
 ;
 ; RV32I-LABEL: spill_x10:
 ; RV32I:       # %bb.0: # %entry
@@ -3274,7 +3570,9 @@ define void @spill_x10() {
 ; RV32I-NEXT:    li s10, 0
 ; RV32I-NEXT:    #NO_APP
 ; RV32I-NEXT:    lw s10, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore s10
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: spill_x10:
@@ -3287,7 +3585,9 @@ define void @spill_x10() {
 ; RV64I-NEXT:    li s10, 0
 ; RV64I-NEXT:    #NO_APP
 ; RV64I-NEXT:    ld s10, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore s10
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 entry:
   tail call void asm sideeffect "li s10, 0", "~{s10}"()
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 6a0dbbe356a165..dd270fa12183ed 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -62,9 +62,13 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 32
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ;
 ; SUBREGLIVENESS-LABEL: last_chance_recoloring_failure:
@@ -118,9 +122,13 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    csrr a0, vlenb
 ; SUBREGLIVENESS-NEXT:    slli a0, a0, 4
 ; SUBREGLIVENESS-NEXT:    add sp, sp, a0
+; SUBREGLIVENESS-NEXT:    .cfi_def_cfa sp, 32
 ; SUBREGLIVENESS-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; SUBREGLIVENESS-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; SUBREGLIVENESS-NEXT:    .cfi_restore ra
+; SUBREGLIVENESS-NEXT:    .cfi_restore s0
 ; SUBREGLIVENESS-NEXT:    addi sp, sp, 32
+; SUBREGLIVENESS-NEXT:    .cfi_def_cfa_offset 0
 ; SUBREGLIVENESS-NEXT:    ret
 entry:
   %i = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr nonnull poison, <vscale x 16 x i32> poison, i64 55, i64 4)
diff --git a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
index adf5f9863b79f8..962530e454db77 100644
--- a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
@@ -35,7 +35,10 @@ define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
 ; CHECK-NEXT:    mv a0, s1
 ; CHECK-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %resolveCall2 = inttoptr i64 244837814094590 to i8*
diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll
index 2fa344d4d79a7f..1a0be244c824c2 100644
--- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll
@@ -25,7 +25,9 @@ define i1 @test_i1_return() gc "statepoint-example" {
 ; CHECK-NEXT:    call return_i1
 ; CHECK-NEXT:  .Ltmp0:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ; This is just checking that a i1 gets lowered normally when there's no extra
 ; state arguments to the statepoint
@@ -45,7 +47,9 @@ define i32 @test_i32_return() gc "statepoint-example" {
 ; CHECK-NEXT:    call return_i32
 ; CHECK-NEXT:  .Ltmp1:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i32 ()) @return_i32, i32 0, i32 0, i32 0, i32 0)
@@ -63,7 +67,9 @@ define ptr @test_i32ptr_return() gc "statepoint-example" {
 ; CHECK-NEXT:    call return_i32ptr
 ; CHECK-NEXT:  .Ltmp2:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(ptr ()) @return_i32ptr, i32 0, i32 0, i32 0, i32 0)
@@ -81,7 +87,9 @@ define float @test_float_return() gc "statepoint-example" {
 ; CHECK-NEXT:    call return_float
 ; CHECK-NEXT:  .Ltmp3:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(float ()) @return_float, i32 0, i32 0, i32 0, i32 0)
@@ -99,7 +107,9 @@ define %struct @test_struct_return() gc "statepoint-example" {
 ; CHECK-NEXT:    call return_struct
 ; CHECK-NEXT:  .Ltmp4:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(%struct ()) @return_struct, i32 0, i32 0, i32 0, i32 0)
@@ -118,7 +128,9 @@ define i1 @test_relocate(ptr addrspace(1) %a) gc "statepoint-example" {
 ; CHECK-NEXT:    call return_i1
 ; CHECK-NEXT:  .Ltmp5:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ; Check that an ununsed relocate has no code-generation impact
 entry:
@@ -140,7 +152,9 @@ define void @test_void_vararg() gc "statepoint-example" {
 ; CHECK-NEXT:    call varargf
 ; CHECK-NEXT:  .Ltmp6:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ; Check a statepoint wrapping a *ptr returning vararg function works
 entry:
@@ -160,7 +174,9 @@ define i1 @test_i1_return_patchable() gc "statepoint-example" {
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:  .Ltmp7:
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ; A patchable variant of test_i1_return
 entry:
@@ -197,7 +213,10 @@ define i1 @test_cross_bb(ptr addrspace(1) %a, i1 %external_cond) gc "statepoint-
 ; CHECK-NEXT:  .LBB8_3: # %right
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)]
@@ -237,7 +256,9 @@ define void @test_attributes(ptr byval(%struct2) %s) gc "statepoint-example" {
 ; CHECK-NEXT:    call consume_attributes
 ; CHECK-NEXT:  .Ltmp9:
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
 ; Check that arguments with attributes are lowered correctly.
diff --git a/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll b/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
index 225680e846bac7..6752bfe5798570 100644
--- a/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
+++ b/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
@@ -53,7 +53,15 @@ define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee_cfi(<vscale x 1 x
 ; OMIT-FP-NEXT:    slli a1, a0, 3
 ; OMIT-FP-NEXT:    sub a0, a1, a0
 ; OMIT-FP-NEXT:    add sp, sp, a0
+; OMIT-FP-NEXT:    .cfi_def_cfa sp, 16
+; OMIT-FP-NEXT:    .cfi_restore v1
+; OMIT-FP-NEXT:    .cfi_restore v2
+; OMIT-FP-NEXT:    .cfi_restore v4
+; OMIT-FP-NEXT:    .cfi_restore v1
+; OMIT-FP-NEXT:    .cfi_restore v2
+; OMIT-FP-NEXT:    .cfi_restore v4
 ; OMIT-FP-NEXT:    addi sp, sp, 16
+; OMIT-FP-NEXT:    .cfi_def_cfa_offset 0
 ; OMIT-FP-NEXT:    ret
 ;
 ; NO-OMIT-FP-LABEL: test_vector_callee_cfi:
@@ -111,10 +119,20 @@ define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee_cfi(<vscale x 1 x
 ; NO-OMIT-FP-NEXT:    sub a0, s0, a0
 ; NO-OMIT-FP-NEXT:    addi a0, a0, -32
 ; NO-OMIT-FP-NEXT:    vl4r.v v4, (a0) # Unknown-size Folded Reload
+; NO-OMIT-FP-NEXT:    .cfi_restore v1
+; NO-OMIT-FP-NEXT:    .cfi_restore v2
+; NO-OMIT-FP-NEXT:    .cfi_restore v4
 ; NO-OMIT-FP-NEXT:    addi sp, s0, -32
+; NO-OMIT-FP-NEXT:    .cfi_def_cfa sp, 32
 ; NO-OMIT-FP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; NO-OMIT-FP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; NO-OMIT-FP-NEXT:    .cfi_restore ra
+; NO-OMIT-FP-NEXT:    .cfi_restore s0
+; NO-OMIT-FP-NEXT:    .cfi_restore v1
+; NO-OMIT-FP-NEXT:    .cfi_restore v2
+; NO-OMIT-FP-NEXT:    .cfi_restore v4
 ; NO-OMIT-FP-NEXT:    addi sp, sp, 32
+; NO-OMIT-FP-NEXT:    .cfi_def_cfa_offset 0
 ; NO-OMIT-FP-NEXT:    ret
 entry:
   call void asm sideeffect "",
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index cd2208e31eb6d3..74693e655bf037 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -603,7 +603,9 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i64> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index 8640ac2da53030..c6a3649c9ba8fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -12,6 +12,7 @@ define <vscale x 1 x i64> @access_fixed_object(ptr %val) {
 ; RV64IV-NEXT:    ld a1, 520(sp)
 ; RV64IV-NEXT:    sd a1, 0(a0)
 ; RV64IV-NEXT:    addi sp, sp, 528
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local = alloca i64
   %array = alloca [64 x i64]
@@ -44,7 +45,9 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
 ; RV64IV-NEXT:    vadd.vv v8, v8, v9
 ; RV64IV-NEXT:    csrr a0, vlenb
 ; RV64IV-NEXT:    add sp, sp, a0
+; RV64IV-NEXT:    .cfi_def_cfa sp, 528
 ; RV64IV-NEXT:    addi sp, sp, 528
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local = alloca i64
   %vector = alloca <vscale x 1 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
index 43fb0c10ca46f6..2694fe52de8a68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
@@ -47,9 +47,13 @@ body: |
     ; CHECK-NEXT: $x10 = ADDI killed $x10, -224
     ; CHECK-NEXT: VS1R_V killed renamable $v8, killed renamable $x10
     ; CHECK-NEXT: $x2 = frame-destroy ADDI $x8, -2032
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 2032
     ; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
     ; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NEXT: PseudoRET
     %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
index 2e70c3395090ec..abe1920e437842 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
@@ -32,7 +32,9 @@ define void @test(ptr %addr) {
 ; CHECK-NEXT:    slli a1, a0, 1
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
   %ret = alloca %my_type, align 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index a9a680d54d5897..97903396679158 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -30,7 +30,9 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
 ; CHECK-NEXT:    csrrs a0, vlenb, zero
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
   %ret = alloca %struct.test, align 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll
index 82a3ac4a74d174..4cd1b045529e3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll
@@ -34,7 +34,9 @@ define target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @load_store_m1x5(targe
 ; CHECK-NEXT:    csrrs a0, vlenb, zero
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
   %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 1
@@ -67,7 +69,9 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @load_store_m2x2(targ
 ; CHECK-NEXT:    csrrs a0, vlenb, zero
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
   %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 1
@@ -100,7 +104,9 @@ define target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @load_store_m4x2(targ
 ; CHECK-NEXT:    csrrs a0, vlenb, zero
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
   %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/binop-splats.ll b/llvm/test/CodeGen/RISCV/rvv/binop-splats.ll
index f26e57b5a0b733..f588481533f6f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/binop-splats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/binop-splats.ll
@@ -406,6 +406,7 @@ define <vscale x 1 x i64> @nxv1i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: nxv1i64:
@@ -437,6 +438,7 @@ define <vscale x 2 x i64> @nxv2i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: nxv2i64:
@@ -468,6 +470,7 @@ define <vscale x 4 x i64> @nxv4i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: nxv4i64:
@@ -499,6 +502,7 @@ define <vscale x 8 x i64> @nxv8i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: nxv8i64:
@@ -591,6 +595,7 @@ define <vscale x 1 x i64> @uaddsatnxv1i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddsatnxv1i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
index 94e945f8032059..3ee060aaf24d24 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
@@ -769,6 +769,7 @@ define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitreverse_nxv1i64:
@@ -909,6 +910,7 @@ define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitreverse_nxv2i64:
@@ -1049,6 +1051,7 @@ define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitreverse_nxv4i64:
@@ -1202,7 +1205,9 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitreverse_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 5709de567c18dc..b611e56f7b013b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -1502,6 +1502,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale
 ; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
 ; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv1i64:
@@ -1643,6 +1644,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv1i64_unmasked:
@@ -1786,6 +1788,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale
 ; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
 ; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv2i64:
@@ -1927,6 +1930,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv2i64_unmasked:
@@ -2070,6 +2074,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale
 ; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
 ; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv4i64:
@@ -2211,6 +2216,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv4i64_unmasked:
@@ -2399,7 +2405,9 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv7i64:
@@ -2476,7 +2484,9 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv7i64:
@@ -2571,7 +2581,9 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv7i64_unmasked:
@@ -2760,7 +2772,9 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv8i64:
@@ -2837,7 +2851,9 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i64:
@@ -2932,7 +2948,9 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_nxv8i64_unmasked:
@@ -3092,7 +3110,9 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16:
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
index d3fce3caf8aef9..eff7774304cb0d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
@@ -291,6 +291,7 @@ define <vscale x 1 x i64> @bswap_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bswap_nxv1i64:
@@ -374,6 +375,7 @@ define <vscale x 2 x i64> @bswap_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v12, v8
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bswap_nxv2i64:
@@ -457,6 +459,7 @@ define <vscale x 4 x i64> @bswap_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v16, v8
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bswap_nxv4i64:
@@ -553,7 +556,9 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bswap_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 6917d7e44a8e6a..6f4a6e72cb3e73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -548,6 +548,7 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; RV32-NEXT:    vor.vv v8, v8, v11, v0.t
 ; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv1i64:
@@ -632,6 +633,7 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; RV32-NEXT:    vor.vv v8, v8, v11
 ; RV32-NEXT:    vor.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv1i64_unmasked:
@@ -718,6 +720,7 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; RV32-NEXT:    vor.vv v8, v8, v14, v0.t
 ; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv2i64:
@@ -802,6 +805,7 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; RV32-NEXT:    vor.vv v8, v8, v14
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv2i64_unmasked:
@@ -888,6 +892,7 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; RV32-NEXT:    vor.vv v8, v8, v20, v0.t
 ; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv4i64:
@@ -972,6 +977,7 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; RV32-NEXT:    vor.vv v8, v8, v20
 ; RV32-NEXT:    vor.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv4i64_unmasked:
@@ -1103,7 +1109,9 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv7i64:
@@ -1153,7 +1161,9 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; CHECK-ZVKB-LABEL: vp_bswap_nxv7i64:
@@ -1217,7 +1227,9 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv7i64_unmasked:
@@ -1349,7 +1361,9 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv8i64:
@@ -1399,7 +1413,9 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; CHECK-ZVKB-LABEL: vp_bswap_nxv8i64:
@@ -1463,7 +1479,9 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv8i64_unmasked:
@@ -1560,7 +1578,9 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZVKB-LABEL: vp_bswap_nxv64i16:
@@ -1676,6 +1696,7 @@ define <vscale x 1 x i48> @vp_bswap_nxv1i48(<vscale x 1 x i48> %va, <vscale x 1
 ; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    vsrl.vi v8, v8, 16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_nxv1i48:
diff --git a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
index c0b10be847d1ff..6c946b8c67ef7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
@@ -110,9 +110,13 @@ define riscv_vector_cc void @local_stack_allocation_frame_pointer() "frame-point
 ; SPILL-O2-NEXT:    lbu a0, -1912(s0)
 ; SPILL-O2-NEXT:    sb a0, -1912(s0)
 ; SPILL-O2-NEXT:    addi sp, s0, -2032
+; SPILL-O2-NEXT:    .cfi_def_cfa sp, 2032
 ; SPILL-O2-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; SPILL-O2-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; SPILL-O2-NEXT:    .cfi_restore ra
+; SPILL-O2-NEXT:    .cfi_restore s0
 ; SPILL-O2-NEXT:    addi sp, sp, 2032
+; SPILL-O2-NEXT:    .cfi_def_cfa_offset 0
 ; SPILL-O2-NEXT:    ret
   %va = alloca [2500 x i8], align 4
   %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 600
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 427ce9d097135b..41ff1133ff823c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -182,7 +182,9 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(ptr %x) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <vscale x 128 x i32>, ptr %x
   ret <vscale x 128 x i32> %v
@@ -269,7 +271,9 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_param_nxv32i32_nxv32i32_nxv32i32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %r = add <vscale x 32 x i32> %x, %y
   %s = add <vscale x 32 x i32> %r, %z
@@ -312,9 +316,13 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
 ; RV32-NEXT:    vmv8r.v v16, v24
 ; RV32-NEXT:    call ext2
 ; RV32-NEXT:    addi sp, s0, -144
+; RV32-NEXT:    .cfi_def_cfa sp, 144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 144
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_i32:
@@ -346,9 +354,13 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
 ; RV64-NEXT:    vmv8r.v v16, v24
 ; RV64-NEXT:    call ext2
 ; RV64-NEXT:    addi sp, s0, -144
+; RV64-NEXT:    .cfi_def_cfa sp, 144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 144
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %t = call fastcc <vscale x 32 x i32> @ext2(<vscale x 32 x i32> %y, <vscale x 32 x i32> %x, i32 %w, i32 2)
   ret <vscale x 32 x i32> %t
@@ -418,9 +430,13 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    call ext3
 ; RV32-NEXT:    addi sp, s0, -144
+; RV32-NEXT:    .cfi_def_cfa sp, 144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 144
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_i32:
@@ -486,9 +502,13 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    call ext3
 ; RV64-NEXT:    addi sp, s0, -144
+; RV64-NEXT:    .cfi_def_cfa sp, 144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 144
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %t = call fastcc <vscale x 32 x i32> @ext3(<vscale x 32 x i32> %z, <vscale x 32 x i32> %y, <vscale x 32 x i32> %x, i32 %w, i32 42)
   ret <vscale x 32 x i32> %t
@@ -567,10 +587,15 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
 ; RV32-NEXT:    call vector_arg_indirect_stack
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    addi sp, s0, -144
+; RV32-NEXT:    .cfi_def_cfa sp, 144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
 ; RV32-NEXT:    addi sp, sp, 144
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: pass_vector_arg_indirect_stack:
@@ -625,10 +650,15 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
 ; RV64-NEXT:    call vector_arg_indirect_stack
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    addi sp, s0, -160
+; RV64-NEXT:    .cfi_def_cfa sp, 160
 ; RV64-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
 ; RV64-NEXT:    addi sp, sp, 160
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %s = call fastcc <vscale x 32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> zeroinitializer, i32 8)
   ret <vscale x 32 x i32> %s
@@ -696,10 +726,15 @@ define fastcc <vscale x 16 x i32> @pass_vector_arg_indirect_stack_no_gpr(<vscale
 ; RV32-NEXT:    call vector_arg_indirect_stack_no_gpr
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    .cfi_def_cfa sp, 80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 68(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
 ; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: pass_vector_arg_indirect_stack_no_gpr:
@@ -741,10 +776,15 @@ define fastcc <vscale x 16 x i32> @pass_vector_arg_indirect_stack_no_gpr(<vscale
 ; RV64-NEXT:    call vector_arg_indirect_stack_no_gpr
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    addi sp, s0, -96
+; RV64-NEXT:    .cfi_def_cfa sp, 96
 ; RV64-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 80(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
 ; RV64-NEXT:    addi sp, sp, 96
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %s = call fastcc <vscale x 16 x i32> @vector_arg_indirect_stack_no_gpr(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32> zeroinitializer)
   ret <vscale x 16 x i32> %s
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index e03698eeb97151..d8a2975fa061eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -47,9 +47,13 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
 ; RV32-NEXT:    vmv.v.i v16, 0
 ; RV32-NEXT:    call callee_scalable_vector_split_indirect
 ; RV32-NEXT:    addi sp, s0, -144
+; RV32-NEXT:    .cfi_def_cfa sp, 144
 ; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 144
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: caller_scalable_vector_split_indirect:
@@ -78,9 +82,13 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
 ; RV64-NEXT:    vmv.v.i v16, 0
 ; RV64-NEXT:    call callee_scalable_vector_split_indirect
 ; RV64-NEXT:    addi sp, s0, -144
+; RV64-NEXT:    .cfi_def_cfa sp, 144
 ; RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 144
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %c = alloca i64
   %a = call <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> %x)
@@ -99,7 +107,9 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    vmv2r.v v10, v12
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: caller_tuple_return:
@@ -113,7 +123,9 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
 ; RV64-NEXT:    vmv2r.v v8, v10
 ; RV64-NEXT:    vmv2r.v v10, v12
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
   %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
@@ -137,7 +149,9 @@ define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x)
 ; RV32-NEXT:    vmv2r.v v10, v12
 ; RV32-NEXT:    call callee_tuple_argument
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: caller_tuple_argument:
@@ -151,7 +165,9 @@ define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x)
 ; RV64-NEXT:    vmv2r.v v10, v12
 ; RV64-NEXT:    call callee_tuple_argument
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 0
   %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index 15cff650765efa..86ef09d712e951 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -343,7 +343,9 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.ceil.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -411,7 +413,9 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16_unmasked(<vscale x 32 x bflo
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.ceil.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -930,7 +934,9 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1013,7 +1019,9 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1513,7 +1521,9 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index 52811133c53f3d..ee4be0cf865a2d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -241,7 +241,9 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-LABEL: test_compresstore_v256i8:
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 7031f93edc2c3e..093eb0ead313ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2220,7 +2220,9 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
 ; RV32-NEXT:    li a1, 56
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_nxv16i64:
@@ -2305,7 +2307,9 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i64:
@@ -2437,7 +2441,9 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_nxv16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index d36240e493e41d..8ebba3e1af7590 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2480,7 +2480,9 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
 ; RV32-NEXT:    li a1, 56
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_nxv16i64:
@@ -2571,7 +2573,9 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vp_cttz_nxv16i64:
@@ -2710,7 +2714,9 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_nxv16i64_unmasked:
@@ -4039,7 +4045,9 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vp_cttz_zero_undef_nxv16i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 1b9ce12af01f96..9e6a36d68833d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -151,6 +151,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x8, -2032
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa $x2, 2032
   ; CHECK-NEXT:   $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
   ; CHECK-NEXT:   $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
   ; CHECK-NEXT:   $x18 = LD $x2, 2008 :: (load (s64) from %stack.5)
@@ -163,7 +164,20 @@ body:             |
   ; CHECK-NEXT:   $x25 = LD $x2, 1952 :: (load (s64) from %stack.12)
   ; CHECK-NEXT:   $x26 = LD $x2, 1944 :: (load (s64) from %stack.13)
   ; CHECK-NEXT:   $x27 = LD $x2, 1936 :: (load (s64) from %stack.14)
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x1
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x8
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x18
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x19
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x20
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x21
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x22
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x23
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x24
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x25
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x26
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION restore $x27
   ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 2032
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 0
   ; CHECK-NEXT:   PseudoRET
   bb.0:
     successors: %bb.1, %bb.2
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 86ef78be97afb0..65f22370d729a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -1305,9 +1305,13 @@ define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    fld fa0, -8(a0)
 ; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    .cfi_def_cfa sp, 80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: extractelt_nxv16f64_neg1:
@@ -1342,9 +1346,13 @@ define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
 ; RV64-NEXT:    add a0, a0, a2
 ; RV64-NEXT:    fld fa0, 0(a0)
 ; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    .cfi_def_cfa sp, 80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = extractelement <vscale x 16 x double> %v, i32 -1
   ret double %r
@@ -1392,9 +1400,13 @@ define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 zeroext %i
 ; RV32-NEXT:    vs8r.v v16, (a1)
 ; RV32-NEXT:    fld fa0, 0(a0)
 ; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    .cfi_def_cfa sp, 80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: extractelt_nxv16f64_idx:
@@ -1427,9 +1439,13 @@ define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 zeroext %i
 ; RV64-NEXT:    vs8r.v v16, (a1)
 ; RV64-NEXT:    fld fa0, 0(a0)
 ; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    .cfi_def_cfa sp, 80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = extractelement <vscale x 16 x double> %v, i32 %idx
   ret double %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index 4d6bc349ffacbc..e6263ec9f00047 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -885,9 +885,13 @@ define i32 @extractelt_nxv32i32_neg1(<vscale x 32 x i32> %v) {
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    lw a0, -4(a0)
 ; CHECK-NEXT:    addi sp, s0, -80
+; CHECK-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 80
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i32> %v, i32 -1
   ret i32 %r
@@ -935,9 +939,13 @@ define i32 @extractelt_nxv32i32_idx(<vscale x 32 x i32> %v, i32 %idx) {
 ; CHECK-NEXT:    vs8r.v v16, (a1)
 ; CHECK-NEXT:    lw a0, 0(a0)
 ; CHECK-NEXT:    addi sp, s0, -80
+; CHECK-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 80
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i32> %v, i32 %idx
   ret i32 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index aba0ad022005ba..d5c2b9e4842068 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -879,9 +879,13 @@ define i64 @extractelt_nxv16i64_neg1(<vscale x 16 x i64> %v) {
 ; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:    ld a0, 0(a0)
 ; CHECK-NEXT:    addi sp, s0, -80
+; CHECK-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 80
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i64> %v, i32 -1
   ret i64 %r
@@ -929,9 +933,13 @@ define i64 @extractelt_nxv16i64_idx(<vscale x 16 x i64> %v, i32 zeroext %idx) {
 ; CHECK-NEXT:    vs8r.v v16, (a1)
 ; CHECK-NEXT:    ld a0, 0(a0)
 ; CHECK-NEXT:    addi sp, s0, -80
+; CHECK-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 80
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i64> %v, i32 %idx
   ret i64 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index f1cfb6748fd619..a4e7bb2f31048c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -44,9 +44,13 @@ define <512 x i8> @single_source(<512 x i8> %a) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    addi sp, s0, -1536
+; CHECK-NEXT:    .cfi_def_cfa sp, 1536
 ; CHECK-NEXT:    ld ra, 1528(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 1520(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 1536
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = shufflevector <512 x i8> %a, <512 x i8> poison, <512 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 500, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 258>
   ret <512 x i8> %res
@@ -155,9 +159,13 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, s0, -1536
+; CHECK-NEXT:    .cfi_def_cfa sp, 1536
 ; CHECK-NEXT:    ld ra, 1528(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 1520(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 1536
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = shufflevector <512 x i8> %a, <512 x i8> %b, <512 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 548, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 4, i32 574, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 473, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 674, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 500, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 555>
   ret <512 x i8> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
index 8f40b02423094a..607dd0dd1b3f2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
@@ -471,6 +471,7 @@ define <1 x i64> @v1i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: v1i64:
@@ -502,6 +503,7 @@ define <2 x i64> @v2i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: v2i64:
@@ -534,6 +536,7 @@ define <4 x i64> @v4i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: v4i64:
@@ -566,6 +569,7 @@ define <8 x i64> @v8i64(i64 %x, i64 %y) {
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: v8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index f124d550df16da..5a1a8e6519693b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -158,6 +158,7 @@ define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
 ; RV64ELEN32-NEXT:    vse8.v v8, (a0)
 ; RV64ELEN32-NEXT:    ld a0, 8(sp)
 ; RV64ELEN32-NEXT:    addi sp, sp, 16
+; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ELEN32-NEXT:    ret
   %b = bitcast <8 x i8> %a to i64
   ret i64 %b
@@ -196,6 +197,7 @@ define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
 ; RV64ELEN32-NEXT:    vse16.v v8, (a0)
 ; RV64ELEN32-NEXT:    ld a0, 8(sp)
 ; RV64ELEN32-NEXT:    addi sp, sp, 16
+; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ELEN32-NEXT:    ret
   %b = bitcast <4 x i16> %a to i64
   ret i64 %b
@@ -234,6 +236,7 @@ define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
 ; RV64ELEN32-NEXT:    vse32.v v8, (a0)
 ; RV64ELEN32-NEXT:    ld a0, 8(sp)
 ; RV64ELEN32-NEXT:    addi sp, sp, 16
+; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ELEN32-NEXT:    ret
   %b = bitcast <2 x i32> %a to i64
   ret i64 %b
@@ -358,6 +361,7 @@ define double @bitcast_v8i8_f64(<8 x i8> %a) {
 ; ELEN32-NEXT:    vse8.v v8, (a0)
 ; ELEN32-NEXT:    fld fa0, 8(sp)
 ; ELEN32-NEXT:    addi sp, sp, 16
+; ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; ELEN32-NEXT:    ret
   %b = bitcast <8 x i8> %a to double
   ret double %b
@@ -379,6 +383,7 @@ define double @bitcast_v4i16_f64(<4 x i16> %a) {
 ; ELEN32-NEXT:    vse16.v v8, (a0)
 ; ELEN32-NEXT:    fld fa0, 8(sp)
 ; ELEN32-NEXT:    addi sp, sp, 16
+; ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; ELEN32-NEXT:    ret
   %b = bitcast <4 x i16> %a to double
   ret double %b
@@ -400,6 +405,7 @@ define double @bitcast_v2i32_f64(<2 x i32> %a) {
 ; ELEN32-NEXT:    vse32.v v8, (a0)
 ; ELEN32-NEXT:    fld fa0, 8(sp)
 ; ELEN32-NEXT:    addi sp, sp, 16
+; ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; ELEN32-NEXT:    ret
   %b = bitcast <2 x i32> %a to double
   ret double %b
@@ -420,6 +426,7 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) {
 ; RV32ELEN32-NEXT:    sw a1, 12(sp)
 ; RV32ELEN32-NEXT:    fld fa0, 8(sp)
 ; RV32ELEN32-NEXT:    addi sp, sp, 16
+; RV32ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ELEN32-NEXT:    ret
 ;
 ; RV64ELEN32-LABEL: bitcast_v1i64_f64:
@@ -508,6 +515,7 @@ define <4 x i16> @bitcast_i64_v4i16(i64 %a) {
 ; RV64ELEN32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64ELEN32-NEXT:    vle16.v v8, (a0)
 ; RV64ELEN32-NEXT:    addi sp, sp, 16
+; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ELEN32-NEXT:    ret
   %b = bitcast i64 %a to <4 x i16>
   ret <4 x i16> %b
@@ -543,6 +551,7 @@ define <2 x i32> @bitcast_i64_v2i32(i64 %a) {
 ; RV64ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV64ELEN32-NEXT:    vle32.v v8, (a0)
 ; RV64ELEN32-NEXT:    addi sp, sp, 16
+; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ELEN32-NEXT:    ret
   %b = bitcast i64 %a to <2 x i32>
   ret <2 x i32> %b
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 90bedf87e04d30..8fa70eda49a71a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -900,6 +900,7 @@ define <2 x i64> @vp_bitreverse_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %e
 ; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
 ; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v2i64:
@@ -1035,6 +1036,7 @@ define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl)
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v2i64_unmasked:
@@ -1172,6 +1174,7 @@ define <4 x i64> @vp_bitreverse_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %e
 ; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
 ; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v4i64:
@@ -1307,6 +1310,7 @@ define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl)
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v4i64_unmasked:
@@ -1444,6 +1448,7 @@ define <8 x i64> @vp_bitreverse_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %e
 ; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
 ; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v8i64:
@@ -1579,6 +1584,7 @@ define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl)
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v8i64_unmasked:
@@ -1770,7 +1776,9 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v15i64:
@@ -1847,7 +1855,9 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
   ret <15 x i64> %v
@@ -1945,7 +1955,9 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v15i64_unmasked:
@@ -2137,7 +2149,9 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v16i64:
@@ -2214,7 +2228,9 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
@@ -2312,7 +2328,9 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bitreverse_v16i64_unmasked:
@@ -2463,7 +2481,9 @@ define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
   ret <128 x i16> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
index 7f211d0f8f9bad..9102f516122145 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
@@ -176,6 +176,7 @@ define void @bitreverse_v2i64(ptr %x, ptr %y) {
 ; RV32-NEXT:    vor.vv v8, v9, v8
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitreverse_v2i64:
@@ -427,6 +428,7 @@ define void @bitreverse_v4i64(ptr %x, ptr %y) {
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitreverse_v4i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
index 6f2e86097d6ff9..1b2de91e0ab77d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
@@ -318,6 +318,7 @@ define <2 x i64> @vp_bswap_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vor.vv v8, v8, v11, v0.t
 ; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v2i64:
@@ -396,6 +397,7 @@ define <2 x i64> @vp_bswap_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    vor.vv v8, v8, v11
 ; RV32-NEXT:    vor.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v2i64_unmasked:
@@ -476,6 +478,7 @@ define <4 x i64> @vp_bswap_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vor.vv v8, v8, v14, v0.t
 ; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v4i64:
@@ -554,6 +557,7 @@ define <4 x i64> @vp_bswap_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    vor.vv v8, v8, v14
 ; RV32-NEXT:    vor.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v4i64_unmasked:
@@ -634,6 +638,7 @@ define <8 x i64> @vp_bswap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vor.vv v8, v8, v20, v0.t
 ; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v8i64:
@@ -712,6 +717,7 @@ define <8 x i64> @vp_bswap_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    vor.vv v8, v8, v20
 ; RV32-NEXT:    vor.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v8i64_unmasked:
@@ -837,7 +843,9 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v15i64:
@@ -887,7 +895,9 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
   ret <15 x i64> %v
@@ -945,7 +955,9 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v15i64_unmasked:
@@ -1071,7 +1083,9 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v16i64:
@@ -1121,7 +1135,9 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
@@ -1179,7 +1195,9 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_bswap_v16i64_unmasked:
@@ -1267,7 +1285,9 @@ define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
   ret <128 x i16> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
index d5338f9b6c6fc0..364be063a570c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
@@ -104,6 +104,7 @@ define void @bswap_v2i64(ptr %x, ptr %y) {
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bswap_v2i64:
@@ -256,6 +257,7 @@ define void @bswap_v4i64(ptr %x, ptr %y) {
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bswap_v4i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index 9f48fdb3608a0b..5b0d4b55f06a9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -186,7 +186,9 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; CHECK-NEXT:    vmv8r.v v16, v24
 ; CHECK-NEXT:    call ext2
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %t = call fastcc <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2)
   ret <32 x i32> %t
@@ -214,9 +216,13 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    call ext3
 ; CHECK-NEXT:    addi sp, s0, -256
+; CHECK-NEXT:    .cfi_def_cfa sp, 256
 ; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 256
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %t = call fastcc <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
   ret <32 x i32> %t
@@ -268,9 +274,13 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    call vector_arg_indirect_stack
 ; CHECK-NEXT:    addi sp, s0, -256
+; CHECK-NEXT:    .cfi_def_cfa sp, 256
 ; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 256
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %s = call fastcc <32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
   ret <32 x i32> %s
@@ -326,7 +336,9 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    call vector_arg_direct_stack
 ; CHECK-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 160
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %s = call fastcc <32 x i32> @vector_arg_direct_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 1)
   ret <32 x i32> %s
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 809884cb18129b..fcdb5d5cb6aef7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -186,7 +186,9 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; CHECK-NEXT:    vmv8r.v v16, v24
 ; CHECK-NEXT:    call ext2
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %t = call <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2)
   ret <32 x i32> %t
@@ -214,9 +216,13 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    call ext3
 ; CHECK-NEXT:    addi sp, s0, -256
+; CHECK-NEXT:    .cfi_def_cfa sp, 256
 ; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 256
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
   ret <32 x i32> %t
@@ -269,9 +275,13 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
 ; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    call split_vector_args
 ; CHECK-NEXT:    addi sp, s0, -256
+; CHECK-NEXT:    .cfi_def_cfa sp, 256
 ; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 256
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, ptr %pa
   %b = load <32 x i32>, ptr %pb
@@ -318,7 +328,9 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    call vector_arg_via_stack
 ; CHECK-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 144
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %s = call <32 x i32> @vector_arg_via_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
   ret <32 x i32> %s
@@ -373,7 +385,9 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; CHECK-NEXT:    vmv8r.v v16, v8
 ; CHECK-NEXT:    call vector_mask_arg_via_stack
 ; CHECK-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 160
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %r = call <4 x i1> @vector_mask_arg_via_stack(i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8, <4 x i1> %v, <4 x i1> %v)
   ret <4 x i1> %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index befbfb88550bad..84f1adea7645e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -791,7 +791,9 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
index f5e6b929051939..f76b32f6f327b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
@@ -1592,7 +1592,9 @@ define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_v15i64:
@@ -1708,6 +1710,7 @@ define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_v15i64_unmasked:
@@ -1855,7 +1858,9 @@ define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_v16i64:
@@ -1971,6 +1976,7 @@ define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_v16i64_unmasked:
@@ -2339,7 +2345,9 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    li a1, 56
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_v32i64:
@@ -2450,7 +2458,9 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
@@ -2582,7 +2592,9 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_v32i64_unmasked:
@@ -4228,7 +4240,9 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 z
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_zero_undef_v15i64:
@@ -4344,6 +4358,7 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_zero_undef_v15i64_unmasked:
@@ -4489,7 +4504,9 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 z
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_zero_undef_v16i64:
@@ -4605,6 +4622,7 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_zero_undef_v16i64_unmasked:
@@ -4971,7 +4989,9 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
 ; RV32-NEXT:    li a1, 56
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_zero_undef_v32i64:
@@ -5082,7 +5102,9 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
@@ -5214,7 +5236,9 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctlz_zero_undef_v32i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index e90e52fba642b1..2f4d49103e2e55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1205,7 +1205,9 @@ define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_v15i64:
@@ -1292,6 +1294,7 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_v15i64_unmasked:
@@ -1422,7 +1425,9 @@ define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; RV32-NEXT:    li a1, 24
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_v16i64:
@@ -1509,6 +1514,7 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_v16i64_unmasked:
@@ -1756,7 +1762,9 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-NEXT:    li a1, 48
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_v32i64:
@@ -1840,7 +1848,9 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
@@ -1944,7 +1954,9 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_ctpop_v32i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
index dfad7881066a27..2f3d250f41e488 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
@@ -1342,7 +1342,9 @@ define <15 x i64> @vp_cttz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_v15i64:
@@ -1438,6 +1440,7 @@ define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_v15i64_unmasked:
@@ -1565,7 +1568,9 @@ define <16 x i64> @vp_cttz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_v16i64:
@@ -1661,6 +1666,7 @@ define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_v16i64_unmasked:
@@ -1999,7 +2005,9 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    li a1, 56
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_v32i64:
@@ -2090,7 +2098,9 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
@@ -2202,7 +2212,9 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_v32i64_unmasked:
@@ -3578,7 +3590,9 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 z
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_zero_undef_v15i64:
@@ -3674,6 +3688,7 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_zero_undef_v15i64_unmasked:
@@ -3799,7 +3814,9 @@ define <16 x i64> @vp_cttz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 z
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_zero_undef_v16i64:
@@ -3895,6 +3912,7 @@ define <16 x i64> @vp_cttz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
 ; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_zero_undef_v16i64_unmasked:
@@ -4231,7 +4249,9 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
 ; RV32-NEXT:    li a1, 56
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_zero_undef_v32i64:
@@ -4322,7 +4342,9 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
@@ -4434,7 +4456,9 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_cttz_zero_undef_v32i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
index 5bf8b07efc1daf..5bf167a77c5c7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -611,6 +611,7 @@ define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:  .LBB12_2: # %else
 ; RV32-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index c1b4c5fda6c640..f55f627a464baf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -791,7 +791,9 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 51eb63f5f92212..fdd2da5dd1a761 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -576,7 +576,9 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <16 x double> @llvm.vp.maximum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
@@ -698,7 +700,9 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
@@ -781,7 +785,9 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 03e0ac42c442c4..2ca156364f797b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -576,7 +576,9 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
@@ -698,7 +700,9 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
@@ -781,7 +785,9 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec-bf16.ll
index bdedc5f33c3a19..dd470407995615 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec-bf16.ll
@@ -43,8 +43,11 @@ define <4 x bfloat> @splat_idx_v4bf16(<4 x bfloat> %v, i64 %idx) {
 ; RV32-ZFBFMIN-NEXT:    vmv.v.x v8, a0
 ; RV32-ZFBFMIN-NEXT:    csrr a0, vlenb
 ; RV32-ZFBFMIN-NEXT:    add sp, sp, a0
+; RV32-ZFBFMIN-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-ZFBFMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZFBFMIN-NEXT:    .cfi_restore ra
 ; RV32-ZFBFMIN-NEXT:    addi sp, sp, 48
+; RV32-ZFBFMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-ZFBFMIN-NEXT:    ret
 ;
 ; RV64-ZFBFMIN-LABEL: splat_idx_v4bf16:
@@ -71,8 +74,11 @@ define <4 x bfloat> @splat_idx_v4bf16(<4 x bfloat> %v, i64 %idx) {
 ; RV64-ZFBFMIN-NEXT:    vmv.v.x v8, a0
 ; RV64-ZFBFMIN-NEXT:    csrr a0, vlenb
 ; RV64-ZFBFMIN-NEXT:    add sp, sp, a0
+; RV64-ZFBFMIN-NEXT:    .cfi_def_cfa sp, 48
 ; RV64-ZFBFMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZFBFMIN-NEXT:    .cfi_restore ra
 ; RV64-ZFBFMIN-NEXT:    addi sp, sp, 48
+; RV64-ZFBFMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-ZFBFMIN-NEXT:    ret
   %x = extractelement <4 x bfloat> %v, i64 %idx
   %ins = insertelement <4 x bfloat> poison, bfloat %x, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 53059a4f28d42b..2c4eb185d52144 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -223,8 +223,11 @@ define <4 x half> @splat_idx_v4f16(<4 x half> %v, i64 %idx) {
 ; RV32-ZFHMIN-NEXT:    vmv.v.x v8, a0
 ; RV32-ZFHMIN-NEXT:    csrr a0, vlenb
 ; RV32-ZFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZFHMIN-NEXT:    .cfi_def_cfa sp, 48
 ; RV32-ZFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZFHMIN-NEXT:    .cfi_restore ra
 ; RV32-ZFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-ZFHMIN-NEXT:    ret
 ;
 ; RV64-ZFHMIN-LABEL: splat_idx_v4f16:
@@ -251,8 +254,11 @@ define <4 x half> @splat_idx_v4f16(<4 x half> %v, i64 %idx) {
 ; RV64-ZFHMIN-NEXT:    vmv.v.x v8, a0
 ; RV64-ZFHMIN-NEXT:    csrr a0, vlenb
 ; RV64-ZFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZFHMIN-NEXT:    .cfi_def_cfa sp, 48
 ; RV64-ZFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZFHMIN-NEXT:    .cfi_restore ra
 ; RV64-ZFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-ZFHMIN-NEXT:    ret
   %x = extractelement <4 x half> %v, i64 %idx
   %ins = insertelement <4 x half> poison, half %x, i32 0
@@ -522,9 +528,13 @@ define <16 x float> @buildvec_v16f32(float %e0, float %e1, float %e2, float %e3,
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -128
+; RV32-NEXT:    .cfi_def_cfa sp, 128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: buildvec_v16f32:
@@ -566,9 +576,13 @@ define <16 x float> @buildvec_v16f32(float %e0, float %e1, float %e2, float %e3,
 ; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v0 = insertelement <16 x float> poison, float %e0, i64 0
   %v1 = insertelement <16 x float> %v0, float %e1, i64 1
@@ -662,13 +676,21 @@ define <32 x float> @buildvec_v32f32(float %e0, float %e1, float %e2, float %e3,
 ; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a1)
 ; RV32-NEXT:    addi sp, s0, -256
+; RV32-NEXT:    .cfi_def_cfa sp, 256
 ; RV32-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    fld fs0, 240(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs1, 232(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs2, 224(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs3, 216(sp) # 8-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore fs0
+; RV32-NEXT:    .cfi_restore fs1
+; RV32-NEXT:    .cfi_restore fs2
+; RV32-NEXT:    .cfi_restore fs3
 ; RV32-NEXT:    addi sp, sp, 256
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: buildvec_v32f32:
@@ -767,6 +789,7 @@ define <32 x float> @buildvec_v32f32(float %e0, float %e1, float %e2, float %e3,
 ; RV64-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a1)
 ; RV64-NEXT:    addi sp, s0, -256
+; RV64-NEXT:    .cfi_def_cfa sp, 256
 ; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs0, 232(sp) # 8-byte Folded Reload
@@ -781,7 +804,22 @@ define <32 x float> @buildvec_v32f32(float %e0, float %e1, float %e2, float %e3,
 ; RV64-NEXT:    fld fs9, 160(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs10, 152(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs11, 144(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore fs0
+; RV64-NEXT:    .cfi_restore fs1
+; RV64-NEXT:    .cfi_restore fs2
+; RV64-NEXT:    .cfi_restore fs3
+; RV64-NEXT:    .cfi_restore fs4
+; RV64-NEXT:    .cfi_restore fs5
+; RV64-NEXT:    .cfi_restore fs6
+; RV64-NEXT:    .cfi_restore fs7
+; RV64-NEXT:    .cfi_restore fs8
+; RV64-NEXT:    .cfi_restore fs9
+; RV64-NEXT:    .cfi_restore fs10
+; RV64-NEXT:    .cfi_restore fs11
 ; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v0 = insertelement <32 x float> poison, float %e0, i64 0
   %v1 = insertelement <32 x float> %v0, float %e1, i64 1
@@ -842,9 +880,13 @@ define <8 x double> @buildvec_v8f64(double %e0, double %e1, double %e2, double %
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -128
+; RV32-NEXT:    .cfi_def_cfa sp, 128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: buildvec_v8f64:
@@ -870,9 +912,13 @@ define <8 x double> @buildvec_v8f64(double %e0, double %e1, double %e2, double %
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v0 = insertelement <8 x double> poison, double %e0, i64 0
   %v1 = insertelement <8 x double> %v0, double %e1, i64 1
@@ -933,9 +979,13 @@ define <16 x double> @buildvec_v16f64(double %e0, double %e1, double %e2, double
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -384
+; RV32-NEXT:    .cfi_def_cfa sp, 384
 ; RV32-NEXT:    lw ra, 380(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 376(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 384
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: buildvec_v16f64:
@@ -969,9 +1019,13 @@ define <16 x double> @buildvec_v16f64(double %e0, double %e1, double %e2, double
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -256
+; RV64-NEXT:    .cfi_def_cfa sp, 256
 ; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v0 = insertelement <16 x double> poison, double %e0, i64 0
   %v1 = insertelement <16 x double> %v0, double %e1, i64 1
@@ -1098,6 +1152,7 @@ define <32 x double> @buildvec_v32f64(double %e0, double %e1, double %e2, double
 ; RV32-NEXT:    addi a0, sp, 256
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -512
+; RV32-NEXT:    .cfi_def_cfa sp, 512
 ; RV32-NEXT:    lw ra, 508(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 504(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    fld fs0, 496(sp) # 8-byte Folded Reload
@@ -1112,7 +1167,22 @@ define <32 x double> @buildvec_v32f64(double %e0, double %e1, double %e2, double
 ; RV32-NEXT:    fld fs9, 424(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs10, 416(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs11, 408(sp) # 8-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore fs0
+; RV32-NEXT:    .cfi_restore fs1
+; RV32-NEXT:    .cfi_restore fs2
+; RV32-NEXT:    .cfi_restore fs3
+; RV32-NEXT:    .cfi_restore fs4
+; RV32-NEXT:    .cfi_restore fs5
+; RV32-NEXT:    .cfi_restore fs6
+; RV32-NEXT:    .cfi_restore fs7
+; RV32-NEXT:    .cfi_restore fs8
+; RV32-NEXT:    .cfi_restore fs9
+; RV32-NEXT:    .cfi_restore fs10
+; RV32-NEXT:    .cfi_restore fs11
 ; RV32-NEXT:    addi sp, sp, 512
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: buildvec_v32f64:
@@ -1188,13 +1258,21 @@ define <32 x double> @buildvec_v32f64(double %e0, double %e1, double %e2, double
 ; RV64-NEXT:    mv a0, sp
 ; RV64-NEXT:    vle64.v v16, (a0)
 ; RV64-NEXT:    addi sp, s0, -384
+; RV64-NEXT:    .cfi_def_cfa sp, 384
 ; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs0, 360(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs1, 352(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs2, 344(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs3, 336(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore fs0
+; RV64-NEXT:    .cfi_restore fs1
+; RV64-NEXT:    .cfi_restore fs2
+; RV64-NEXT:    .cfi_restore fs3
 ; RV64-NEXT:    addi sp, sp, 384
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v0 = insertelement <32 x double> poison, double %e0, i64 0
   %v1 = insertelement <32 x double> %v0, double %e1, i64 1
@@ -1337,7 +1415,20 @@ define <32 x double> @buildvec_v32f64_exact_vlen(double %e0, double %e1, double
 ; RV32-NEXT:    fld fs9, 32(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs10, 24(sp) # 8-byte Folded Reload
 ; RV32-NEXT:    fld fs11, 16(sp) # 8-byte Folded Reload
+; RV32-NEXT:    .cfi_restore fs0
+; RV32-NEXT:    .cfi_restore fs1
+; RV32-NEXT:    .cfi_restore fs2
+; RV32-NEXT:    .cfi_restore fs3
+; RV32-NEXT:    .cfi_restore fs4
+; RV32-NEXT:    .cfi_restore fs5
+; RV32-NEXT:    .cfi_restore fs6
+; RV32-NEXT:    .cfi_restore fs7
+; RV32-NEXT:    .cfi_restore fs8
+; RV32-NEXT:    .cfi_restore fs9
+; RV32-NEXT:    .cfi_restore fs10
+; RV32-NEXT:    .cfi_restore fs11
 ; RV32-NEXT:    addi sp, sp, 112
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: buildvec_v32f64_exact_vlen:
@@ -1421,7 +1512,16 @@ define <32 x double> @buildvec_v32f64_exact_vlen(double %e0, double %e1, double
 ; RV64-NEXT:    fld fs5, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs6, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    fld fs7, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore fs0
+; RV64-NEXT:    .cfi_restore fs1
+; RV64-NEXT:    .cfi_restore fs2
+; RV64-NEXT:    .cfi_restore fs3
+; RV64-NEXT:    .cfi_restore fs4
+; RV64-NEXT:    .cfi_restore fs5
+; RV64-NEXT:    .cfi_restore fs6
+; RV64-NEXT:    .cfi_restore fs7
 ; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v0 = insertelement <32 x double> poison, double %e0, i64 0
   %v1 = insertelement <32 x double> %v0, double %e1, i64 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index f3b124aa34dcb5..2780aa2d9747e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -275,7 +275,9 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
 ; V128-NEXT:    csrr a0, vlenb
 ; V128-NEXT:    slli a0, a0, 3
 ; V128-NEXT:    add sp, sp, a0
+; V128-NEXT:    .cfi_def_cfa sp, 16
 ; V128-NEXT:    addi sp, sp, 16
+; V128-NEXT:    .cfi_def_cfa_offset 0
 ; V128-NEXT:    ret
 ;
 ; V512-LABEL: interleave_v32f32:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
index a2ff77625b7584..86abfb771162fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
@@ -692,7 +692,9 @@ define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %res
@@ -727,7 +729,9 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index e81f686a283032..dae34fe9ce0ca8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -761,9 +761,13 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
 ; RV32VLA-NEXT:    vs8r.v v8, (a0)
 ; RV32VLA-NEXT:    vs8r.v v16, (a1)
 ; RV32VLA-NEXT:    addi sp, s0, -80
+; RV32VLA-NEXT:    .cfi_def_cfa sp, 80
 ; RV32VLA-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32VLA-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32VLA-NEXT:    .cfi_restore ra
+; RV32VLA-NEXT:    .cfi_restore s0
 ; RV32VLA-NEXT:    addi sp, sp, 80
+; RV32VLA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32VLA-NEXT:    ret
 ;
 ; RV64VLA-LABEL: insert_v2i64_nxv16i64_hi:
@@ -794,9 +798,13 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
 ; RV64VLA-NEXT:    vs8r.v v8, (a0)
 ; RV64VLA-NEXT:    vs8r.v v16, (a1)
 ; RV64VLA-NEXT:    addi sp, s0, -80
+; RV64VLA-NEXT:    .cfi_def_cfa sp, 80
 ; RV64VLA-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64VLA-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64VLA-NEXT:    .cfi_restore ra
+; RV64VLA-NEXT:    .cfi_restore s0
 ; RV64VLA-NEXT:    addi sp, sp, 80
+; RV64VLA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64VLA-NEXT:    ret
 ;
 ; RV32VLS-LABEL: insert_v2i64_nxv16i64_hi:
@@ -822,9 +830,13 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
 ; RV32VLS-NEXT:    vs8r.v v8, (a0)
 ; RV32VLS-NEXT:    vs8r.v v16, (a1)
 ; RV32VLS-NEXT:    addi sp, s0, -80
+; RV32VLS-NEXT:    .cfi_def_cfa sp, 80
 ; RV32VLS-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32VLS-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32VLS-NEXT:    .cfi_restore ra
+; RV32VLS-NEXT:    .cfi_restore s0
 ; RV32VLS-NEXT:    addi sp, sp, 80
+; RV32VLS-NEXT:    .cfi_def_cfa_offset 0
 ; RV32VLS-NEXT:    ret
 ;
 ; RV64VLS-LABEL: insert_v2i64_nxv16i64_hi:
@@ -850,9 +862,13 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
 ; RV64VLS-NEXT:    vs8r.v v8, (a0)
 ; RV64VLS-NEXT:    vs8r.v v16, (a1)
 ; RV64VLS-NEXT:    addi sp, s0, -80
+; RV64VLS-NEXT:    .cfi_def_cfa sp, 80
 ; RV64VLS-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64VLS-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64VLS-NEXT:    .cfi_restore ra
+; RV64VLS-NEXT:    .cfi_restore s0
 ; RV64VLS-NEXT:    addi sp, sp, 80
+; RV64VLS-NEXT:    .cfi_def_cfa_offset 0
 ; RV64VLS-NEXT:    ret
   %sv = load <2 x i64>, ptr %psv
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 8)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 81fb86cd81cd35..db340457aecb41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -132,9 +132,13 @@ define <64 x i32> @insertelt_v64i32_idx(<64 x i32> %a, i32 %y, i32 zeroext %idx)
 ; RV32-NEXT:    vle32.v v8, (a2)
 ; RV32-NEXT:    vle32.v v16, (a3)
 ; RV32-NEXT:    addi sp, s0, -384
+; RV32-NEXT:    .cfi_def_cfa sp, 384
 ; RV32-NEXT:    lw ra, 380(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 376(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 384
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: insertelt_v64i32_idx:
@@ -161,9 +165,13 @@ define <64 x i32> @insertelt_v64i32_idx(<64 x i32> %a, i32 %y, i32 zeroext %idx)
 ; RV64-NEXT:    vle32.v v8, (a2)
 ; RV64-NEXT:    vle32.v v16, (a3)
 ; RV64-NEXT:    addi sp, s0, -384
+; RV64-NEXT:    .cfi_def_cfa sp, 384
 ; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 384
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %b = insertelement <64 x i32> %a, i32 %y, i32 %idx
   ret <64 x i32> %b
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 43cee6610e7872..a8053089b9f916 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -1175,7 +1175,9 @@ define <8 x i64> @v8xi64_exact(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i
 ; RV32-NEXT:    vslide1down.vx v11, v11, t1
 ; RV32-NEXT:    vslide1down.vx v11, v11, t0
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: v8xi64_exact:
@@ -1384,7 +1386,9 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
 ; RV32-ONLY-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-ONLY-NEXT:    vslidedown.vi v8, v9, 8, v0.t
 ; RV32-ONLY-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-ONLY-NEXT:    .cfi_restore s0
 ; RV32-ONLY-NEXT:    addi sp, sp, 16
+; RV32-ONLY-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-ONLY-NEXT:    ret
 ;
 ; RV32VB-LABEL: buildvec_v16i8_loads_contigous:
@@ -1518,7 +1522,9 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
 ; RV64V-ONLY-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64V-ONLY-NEXT:    vslidedown.vi v8, v9, 8, v0.t
 ; RV64V-ONLY-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64V-ONLY-NEXT:    .cfi_restore s0
 ; RV64V-ONLY-NEXT:    addi sp, sp, 16
+; RV64V-ONLY-NEXT:    .cfi_def_cfa_offset 0
 ; RV64V-ONLY-NEXT:    ret
 ;
 ; RVA22U64-LABEL: buildvec_v16i8_loads_contigous:
@@ -1654,7 +1660,9 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
 ; RV64ZVE32-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64ZVE32-NEXT:    vslidedown.vi v8, v9, 8, v0.t
 ; RV64ZVE32-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64ZVE32-NEXT:    .cfi_restore s0
 ; RV64ZVE32-NEXT:    addi sp, sp, 16
+; RV64ZVE32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32-NEXT:    ret
   %p2 = getelementptr i8, ptr %p, i32 1
   %p3 = getelementptr i8, ptr %p, i32 2
@@ -1755,7 +1763,9 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
 ; RV32-ONLY-NEXT:    vslide1down.vx v8, v8, t2
 ; RV32-ONLY-NEXT:    vslidedown.vi v8, v9, 8, v0.t
 ; RV32-ONLY-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-ONLY-NEXT:    .cfi_restore s0
 ; RV32-ONLY-NEXT:    addi sp, sp, 16
+; RV32-ONLY-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-ONLY-NEXT:    ret
 ;
 ; RV32VB-LABEL: buildvec_v16i8_loads_gather:
@@ -1889,7 +1899,9 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
 ; RV64V-ONLY-NEXT:    vslide1down.vx v8, v8, t2
 ; RV64V-ONLY-NEXT:    vslidedown.vi v8, v9, 8, v0.t
 ; RV64V-ONLY-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64V-ONLY-NEXT:    .cfi_restore s0
 ; RV64V-ONLY-NEXT:    addi sp, sp, 16
+; RV64V-ONLY-NEXT:    .cfi_def_cfa_offset 0
 ; RV64V-ONLY-NEXT:    ret
 ;
 ; RVA22U64-LABEL: buildvec_v16i8_loads_gather:
@@ -2025,7 +2037,9 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
 ; RV64ZVE32-NEXT:    vslide1down.vx v8, v8, t2
 ; RV64ZVE32-NEXT:    vslidedown.vi v8, v9, 8, v0.t
 ; RV64ZVE32-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64ZVE32-NEXT:    .cfi_restore s0
 ; RV64ZVE32-NEXT:    addi sp, sp, 16
+; RV64ZVE32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32-NEXT:    ret
   %p2 = getelementptr i8, ptr %p, i32 1
   %p3 = getelementptr i8, ptr %p, i32 22
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
index e0c676788dcccf..6cab1bc2185287 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
@@ -528,9 +528,13 @@ define i32 @explode_16xi32(<16 x i32> %v) {
 ; RV32-NEXT:    add t2, t2, t5
 ; RV32-NEXT:    add a0, a0, t2
 ; RV32-NEXT:    addi sp, s0, -128
+; RV32-NEXT:    .cfi_def_cfa sp, 128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: explode_16xi32:
@@ -588,9 +592,13 @@ define i32 @explode_16xi32(<16 x i32> %v) {
 ; RV64-NEXT:    add t2, t2, t5
 ; RV64-NEXT:    addw a0, a0, t2
 ; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %e0 = extractelement <16 x i32> %v, i32 0
   %e1 = extractelement <16 x i32> %v, i32 1
@@ -803,9 +811,13 @@ define i64 @explode_8xi64(<8 x i64> %v) {
 ; RV64-NEXT:    add a0, a0, a3
 ; RV64-NEXT:    add a0, a0, a5
 ; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %e0 = extractelement <8 x i64> %v, i32 0
   %e1 = extractelement <8 x i64> %v, i32 1
@@ -989,7 +1001,20 @@ define i64 @explode_16xi64(<16 x i64> %v) {
 ; RV32-NEXT:    lw s9, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s10, 4(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s11, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
+; RV32-NEXT:    .cfi_restore s2
+; RV32-NEXT:    .cfi_restore s3
+; RV32-NEXT:    .cfi_restore s4
+; RV32-NEXT:    .cfi_restore s5
+; RV32-NEXT:    .cfi_restore s6
+; RV32-NEXT:    .cfi_restore s7
+; RV32-NEXT:    .cfi_restore s8
+; RV32-NEXT:    .cfi_restore s9
+; RV32-NEXT:    .cfi_restore s10
+; RV32-NEXT:    .cfi_restore s11
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: explode_16xi64:
@@ -1042,9 +1067,13 @@ define i64 @explode_16xi64(<16 x i64> %v) {
 ; RV64-NEXT:    add t4, t4, t5
 ; RV64-NEXT:    add a0, a0, t4
 ; RV64-NEXT:    addi sp, s0, -256
+; RV64-NEXT:    .cfi_def_cfa sp, 256
 ; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %e0 = extractelement <16 x i64> %v, i32 0
   %e1 = extractelement <16 x i64> %v, i32 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 2ea90203b21030..c65e7aec712ae4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -440,7 +440,9 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; V128-NEXT:    csrr a0, vlenb
 ; V128-NEXT:    slli a0, a0, 3
 ; V128-NEXT:    add sp, sp, a0
+; V128-NEXT:    .cfi_def_cfa sp, 16
 ; V128-NEXT:    addi sp, sp, 16
+; V128-NEXT:    .cfi_def_cfa_offset 0
 ; V128-NEXT:    ret
 ;
 ; V512-LABEL: interleave_v32i32:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index 336a64b1b89ca8..a3a2102b0e70e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -53,6 +53,7 @@ define void @splat_v2i64(ptr %x, i64 %y) {
 ; RV32-NEXT:    vlse64.v v8, (a1), zero
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: splat_v2i64:
@@ -119,6 +120,7 @@ define void @splat_v4i64(ptr %x, i64 %y) {
 ; RV32-NEXT:    vlse64.v v8, (a1), zero
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: splat_v4i64:
@@ -412,6 +414,7 @@ define void @vadd_vx_v16i64(ptr %a, i64 %b, ptr %c) {
 ; RV32-NEXT:    vadd.vv v8, v8, v16
 ; RV32-NEXT:    vse64.v v8, (a3)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v16i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 70bda8c2da0f27..eb6e3b37eb30b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -5536,6 +5536,7 @@ define void @mulhu_vx_v2i64(ptr %x) {
 ; RV32-NEXT:    vsrl.vi v8, v8, 1
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mulhu_vx_v2i64:
@@ -5643,6 +5644,7 @@ define void @mulhs_vx_v2i64(ptr %x) {
 ; RV32-NEXT:    vadd.vv v8, v8, v9
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mulhs_vx_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 5911e8248f2995..4b156192c637cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -633,7 +633,9 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    li a1, 84
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: load_factor6_too_big:
@@ -1068,7 +1070,9 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    slli a1, a0, 6
 ; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %interleaved.vec = load <48 x i64>, ptr %ptr
   %v0 = shufflevector <48 x i64> %interleaved.vec, <48 x i64> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index 805b371f1e3d56..47c9b07bd5aa08 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -20,7 +20,9 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v1i64_v1f32:
@@ -75,8 +77,11 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v2i64_v2f32:
@@ -171,8 +176,11 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v3i64_v3f32:
@@ -279,8 +287,11 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v4i64_v4f32:
@@ -394,9 +405,13 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -208
+; RV32-NEXT:    .cfi_def_cfa sp, 208
 ; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 208
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v8i64_v8f32:
@@ -447,9 +462,13 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
@@ -576,9 +595,13 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a1)
 ; RV32-NEXT:    addi sp, s0, -400
+; RV32-NEXT:    .cfi_def_cfa sp, 400
 ; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 400
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v16i64_v16f32:
@@ -656,9 +679,13 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -384
+; RV64-NEXT:    .cfi_def_cfa sp, 384
 ; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 384
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -681,7 +708,9 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v1i64_v1f64:
@@ -735,8 +764,11 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v2i64_v2f64:
@@ -828,8 +860,11 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 2
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v4i64_v4f64:
@@ -928,9 +963,13 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -272
+; RV32-NEXT:    .cfi_def_cfa sp, 272
 ; RV32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 272
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llrint_v8i64_v8f64:
@@ -980,9 +1019,13 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -192
+; RV64-NEXT:    .cfi_def_cfa sp, 192
 ; RV64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 192
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
index 43184a28ba3238..4f77ca01eb86dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
@@ -349,9 +349,13 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -128
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 128
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x)
   ret <8 x iXLen> %a
@@ -434,9 +438,13 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -192
+; RV32-NEXT:    .cfi_def_cfa sp, 192
 ; RV32-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 184(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 192
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lrint_v16f32:
@@ -514,9 +522,13 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
 ; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV64-i32-NEXT:    vle32.v v8, (a0)
 ; RV64-i32-NEXT:    addi sp, s0, -192
+; RV64-i32-NEXT:    .cfi_def_cfa sp, 192
 ; RV64-i32-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; RV64-i32-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    .cfi_restore ra
+; RV64-i32-NEXT:    .cfi_restore s0
 ; RV64-i32-NEXT:    addi sp, sp, 192
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lrint_v16f32:
@@ -594,9 +606,13 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -384
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 384
 ; RV64-i64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 384
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
   ret <16 x iXLen> %a
@@ -797,9 +813,13 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    fcvt.w.d a0, fa2
 ; RV32-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, s0, -128
+; RV32-NEXT:    .cfi_def_cfa sp, 128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lrint_v8f64:
@@ -847,9 +867,13 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
 ; RV64-i32-NEXT:    fcvt.l.d a0, fa2
 ; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-i32-NEXT:    addi sp, s0, -128
+; RV64-i32-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    .cfi_restore ra
+; RV64-i32-NEXT:    .cfi_restore s0
 ; RV64-i32-NEXT:    addi sp, sp, 128
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lrint_v8f64:
@@ -899,9 +923,13 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -192
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 192
 ; RV64-i64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 192
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x)
   ret <8 x iXLen> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 9cd38056364494..2414ec5f0d56ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -4004,7 +4004,10 @@ define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthr
 ; RV32ZVE32F-NEXT:    sw a1, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_v8i64:
@@ -4251,7 +4254,10 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8i64:
@@ -4525,7 +4531,10 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8i64:
@@ -4801,7 +4810,10 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8i64:
@@ -5084,7 +5096,10 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_v8i16_v8i64:
@@ -5359,7 +5374,10 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8i64:
@@ -5636,7 +5654,10 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8i64:
@@ -5920,7 +5941,10 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_v8i32_v8i64:
@@ -6193,7 +6217,10 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i32_v8i64:
@@ -6467,7 +6494,10 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
 ; RV32ZVE32F-NEXT:    sw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i32_v8i64:
@@ -6765,7 +6795,10 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
 ; RV32ZVE32F-NEXT:    sw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_baseidx_v8i64:
@@ -12854,9 +12887,13 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
 ; RV64V-NEXT:    vslide1down.vx v8, v8, a7
 ; RV64V-NEXT:    vslidedown.vi v8, v9, 4, v0.t
 ; RV64V-NEXT:    addi sp, s0, -128
+; RV64V-NEXT:    .cfi_def_cfa sp, 128
 ; RV64V-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64V-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64V-NEXT:    .cfi_restore ra
+; RV64V-NEXT:    .cfi_restore s0
 ; RV64V-NEXT:    addi sp, sp, 128
+; RV64V-NEXT:    .cfi_def_cfa_offset 0
 ; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_unaligned:
@@ -13738,6 +13775,7 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
 ; RV32ZVE32F-NEXT:    lw a1, 240(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    sw a1, 32(a0)
 ; RV32ZVE32F-NEXT:    addi sp, s0, -512
+; RV32ZVE32F-NEXT:    .cfi_def_cfa sp, 512
 ; RV32ZVE32F-NEXT:    lw ra, 508(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s0, 504(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 500(sp) # 4-byte Folded Reload
@@ -13750,7 +13788,20 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
 ; RV32ZVE32F-NEXT:    lw s9, 472(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s10, 468(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s11, 464(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore ra
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s2
+; RV32ZVE32F-NEXT:    .cfi_restore s3
+; RV32ZVE32F-NEXT:    .cfi_restore s4
+; RV32ZVE32F-NEXT:    .cfi_restore s5
+; RV32ZVE32F-NEXT:    .cfi_restore s6
+; RV32ZVE32F-NEXT:    .cfi_restore s7
+; RV32ZVE32F-NEXT:    .cfi_restore s8
+; RV32ZVE32F-NEXT:    .cfi_restore s9
+; RV32ZVE32F-NEXT:    .cfi_restore s10
+; RV32ZVE32F-NEXT:    .cfi_restore s11
 ; RV32ZVE32F-NEXT:    addi sp, sp, 512
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_split:
@@ -13870,7 +13921,21 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
 ; RV64ZVE32F-NEXT:    ld s9, 56(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s10, 48(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s11, 40(sp) # 8-byte Folded Reload
+; RV64ZVE32F-NEXT:    .cfi_restore ra
+; RV64ZVE32F-NEXT:    .cfi_restore s0
+; RV64ZVE32F-NEXT:    .cfi_restore s1
+; RV64ZVE32F-NEXT:    .cfi_restore s2
+; RV64ZVE32F-NEXT:    .cfi_restore s3
+; RV64ZVE32F-NEXT:    .cfi_restore s4
+; RV64ZVE32F-NEXT:    .cfi_restore s5
+; RV64ZVE32F-NEXT:    .cfi_restore s6
+; RV64ZVE32F-NEXT:    .cfi_restore s7
+; RV64ZVE32F-NEXT:    .cfi_restore s8
+; RV64ZVE32F-NEXT:    .cfi_restore s9
+; RV64ZVE32F-NEXT:    .cfi_restore s10
+; RV64ZVE32F-NEXT:    .cfi_restore s11
 ; RV64ZVE32F-NEXT:    addi sp, sp, 144
+; RV64ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32F-NEXT:    ret
   %ptrs = getelementptr inbounds i64, ptr %base, <32 x i64> <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14, i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30, i64 32, i64 34, i64 36, i64 38, i64 40, i64 42, i64 44, i64 46, i64 48, i64 50, i64 52, i64 54, i64 56, i64 58, i64 60, i64 62>
   %x = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i64> poison)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index bc7758717c1c15..adf49375cba3ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -3205,7 +3205,11 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB41_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw s1, 4(a0)
@@ -3321,7 +3325,11 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
 ; RV64ZVE32F-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
+; RV64ZVE32F-NEXT:    .cfi_restore s0
+; RV64ZVE32F-NEXT:    .cfi_restore s1
+; RV64ZVE32F-NEXT:    .cfi_restore s2
 ; RV64ZVE32F-NEXT:    addi sp, sp, 32
+; RV64ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB41_10: # %cond.store
 ; RV64ZVE32F-NEXT:    ld a1, 0(a1)
@@ -3440,7 +3448,11 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB42_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -3684,7 +3696,11 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB43_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -3930,7 +3946,11 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB44_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -4183,7 +4203,11 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB45_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -4428,7 +4452,11 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB46_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -4675,7 +4703,11 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB47_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -4929,7 +4961,11 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB48_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -5172,7 +5208,11 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB49_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -5416,7 +5456,11 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
 ; RV32ZVE32F-NEXT:    addi sp, sp, 16
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB50_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -5702,7 +5746,17 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
 ; RV32ZVE32F-NEXT:    lw s6, 20(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s7, 16(sp) # 4-byte Folded Reload
 ; RV32ZVE32F-NEXT:    lw s8, 12(sp) # 4-byte Folded Reload
+; RV32ZVE32F-NEXT:    .cfi_restore s0
+; RV32ZVE32F-NEXT:    .cfi_restore s1
+; RV32ZVE32F-NEXT:    .cfi_restore s2
+; RV32ZVE32F-NEXT:    .cfi_restore s3
+; RV32ZVE32F-NEXT:    .cfi_restore s4
+; RV32ZVE32F-NEXT:    .cfi_restore s5
+; RV32ZVE32F-NEXT:    .cfi_restore s6
+; RV32ZVE32F-NEXT:    .cfi_restore s7
+; RV32ZVE32F-NEXT:    .cfi_restore s8
 ; RV32ZVE32F-NEXT:    addi sp, sp, 48
+; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB51_10: # %cond.store
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
@@ -5822,7 +5876,12 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
 ; RV64ZVE32F-NEXT:    ld s3, 0(sp) # 8-byte Folded Reload
+; RV64ZVE32F-NEXT:    .cfi_restore s0
+; RV64ZVE32F-NEXT:    .cfi_restore s1
+; RV64ZVE32F-NEXT:    .cfi_restore s2
+; RV64ZVE32F-NEXT:    .cfi_restore s3
 ; RV64ZVE32F-NEXT:    addi sp, sp, 32
+; RV64ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB51_10: # %cond.store
 ; RV64ZVE32F-NEXT:    ld a2, 0(a2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 4be680e272e5b9..ca17ea49a6f920 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1982,7 +1982,9 @@ define float @vreduce_fminimum_v64f32(ptr %x) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <64 x float>, ptr %x
   %red = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> %v)
@@ -2107,7 +2109,9 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <128 x float>, ptr %x
   %red = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> %v)
@@ -2319,7 +2323,9 @@ define double @vreduce_fminimum_v32f64(ptr %x) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <32 x double>, ptr %x
   %red = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> %v)
@@ -2442,7 +2448,9 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <64 x double>, ptr %x
   %red = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> %v)
@@ -2735,7 +2743,9 @@ define float @vreduce_fmaximum_v64f32(ptr %x) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <64 x float>, ptr %x
   %red = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> %v)
@@ -2860,7 +2870,9 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <128 x float>, ptr %x
   %red = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> %v)
@@ -3072,7 +3084,9 @@ define double @vreduce_fmaximum_v32f64(ptr %x) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <32 x double>, ptr %x
   %red = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> %v)
@@ -3195,7 +3209,9 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = load <64 x double>, ptr %x
   %red = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> %v)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 016f95bfef7e71..84063120cc9743 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -846,6 +846,7 @@ define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_add_v2i64:
@@ -880,6 +881,7 @@ define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umax_v2i64:
@@ -914,6 +916,7 @@ define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smax_v2i64:
@@ -948,6 +951,7 @@ define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umin_v2i64:
@@ -982,6 +986,7 @@ define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smin_v2i64:
@@ -1016,6 +1021,7 @@ define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_and_v2i64:
@@ -1050,6 +1056,7 @@ define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_or_v2i64:
@@ -1084,6 +1091,7 @@ define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_xor_v2i64:
@@ -1118,6 +1126,7 @@ define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_add_v4i64:
@@ -1152,6 +1161,7 @@ define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umax_v4i64:
@@ -1186,6 +1196,7 @@ define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smax_v4i64:
@@ -1220,6 +1231,7 @@ define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umin_v4i64:
@@ -1254,6 +1266,7 @@ define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smin_v4i64:
@@ -1288,6 +1301,7 @@ define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_and_v4i64:
@@ -1322,6 +1336,7 @@ define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_or_v4i64:
@@ -1356,6 +1371,7 @@ define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_xor_v4i64:
@@ -1391,7 +1407,9 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl)
 ; RV32-NEXT:    mv a1, a2
 ; RV32-NEXT:    call __mulsi3
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v1i8:
@@ -1412,7 +1430,9 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl)
 ; RV64-NEXT:    mv a1, a2
 ; RV64-NEXT:    call __muldi3
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 %evl)
   ret i8 %r
@@ -1443,7 +1463,9 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v2i8:
@@ -1468,7 +1490,9 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl)
   ret i8 %r
@@ -1501,7 +1525,9 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v4i8:
@@ -1528,7 +1554,9 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl)
   ret i8 %r
@@ -1563,7 +1591,9 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v8i8:
@@ -1592,7 +1622,9 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v8i8(i8 %s, <8 x i8> %v, <8 x i1> %m, i32 %evl)
   ret i8 %r
@@ -1629,7 +1661,9 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v16i8:
@@ -1660,7 +1694,9 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v16i8(i8 %s, <16 x i8> %v, <16 x i1> %m, i32 %evl)
   ret i8 %r
@@ -1700,7 +1736,9 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m,
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v32i8:
@@ -1734,7 +1772,9 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m,
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v32i8(i8 %s, <32 x i8> %v, <32 x i1> %m, i32 %evl)
   ret i8 %r
@@ -1784,7 +1824,9 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
 ; RV32-NEXT:    slli a0, a0, 24
 ; RV32-NEXT:    srai a0, a0, 24
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_mul_v64i8:
@@ -1828,7 +1870,9 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
 ; RV64-NEXT:    slli a0, a0, 56
 ; RV64-NEXT:    srai a0, a0, 56
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %r = call i8 @llvm.vp.reduce.mul.v64i8(i8 %s, <64 x i8> %v, <64 x i1> %m, i32 %evl)
   ret i8 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 56944e2aa5074d..a4a104abd2ef88 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -1581,7 +1581,9 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV32-NEXT:    csrr a2, vlenb
 ; RV32-NEXT:    slli a2, a2, 4
 ; RV32-NEXT:    add sp, sp, a2
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwreduce_add_v64i64:
@@ -1625,7 +1627,9 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    add sp, sp, a1
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = load <64 x i32>, ptr %x
   %e = sext <64 x i32> %v to <64 x i64>
@@ -1678,7 +1682,9 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV32-NEXT:    csrr a2, vlenb
 ; RV32-NEXT:    slli a2, a2, 4
 ; RV32-NEXT:    add sp, sp, a2
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwreduce_uadd_v64i64:
@@ -1722,7 +1728,9 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    add sp, sp, a1
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %v = load <64 x i32>, ptr %x
   %e = zext <64 x i32> %v to <64 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 1f856d04ca89fd..f5d7e1af887bbe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -566,7 +566,9 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 0f587232680df6..30f516eff10daa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -791,7 +791,9 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 0fb7e6a7de5696..3920bd432d1219 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -791,7 +791,9 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 927f96b6442274..e3b7bf0467d5cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -791,7 +791,9 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
index 8e2a225622eec2..8239789d6bfd7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
@@ -1107,7 +1107,9 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 4
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN32-LABEL: fcmp_oeq_vv_v128f16:
@@ -1653,9 +1655,13 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
 ; ZVFHMIN32-NEXT:    vand.vi v8, v8, 1
 ; ZVFHMIN32-NEXT:    vmsne.vi v0, v8, 0
 ; ZVFHMIN32-NEXT:    addi sp, s0, -768
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 768
 ; ZVFHMIN32-NEXT:    lw ra, 764(sp) # 4-byte Folded Reload
 ; ZVFHMIN32-NEXT:    lw s0, 760(sp) # 4-byte Folded Reload
+; ZVFHMIN32-NEXT:    .cfi_restore ra
+; ZVFHMIN32-NEXT:    .cfi_restore s0
 ; ZVFHMIN32-NEXT:    addi sp, sp, 768
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN32-NEXT:    ret
 ;
 ; ZVFHMIN64-LABEL: fcmp_oeq_vv_v128f16:
@@ -2201,9 +2207,13 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
 ; ZVFHMIN64-NEXT:    vand.vi v8, v8, 1
 ; ZVFHMIN64-NEXT:    vmsne.vi v0, v8, 0
 ; ZVFHMIN64-NEXT:    addi sp, s0, -768
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 768
 ; ZVFHMIN64-NEXT:    ld ra, 760(sp) # 8-byte Folded Reload
 ; ZVFHMIN64-NEXT:    ld s0, 752(sp) # 8-byte Folded Reload
+; ZVFHMIN64-NEXT:    .cfi_restore ra
+; ZVFHMIN64-NEXT:    .cfi_restore s0
 ; ZVFHMIN64-NEXT:    addi sp, sp, 768
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN64-NEXT:    ret
   %v = call <128 x i1> @llvm.vp.fcmp.v128f16(<128 x half> %va, <128 x half> %vb, metadata !"oeq", <128 x i1> %m, i32 %evl)
   ret <128 x i1> %v
@@ -2814,7 +2824,9 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double> %va, <32 x double> %vb, metadata !"oeq", <32 x i1> %m, i32 %evl)
   ret <32 x i1> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index 5f3847e085055b..5cf4c6efa761a1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -637,7 +637,9 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <256 x i1> @llvm.vp.icmp.v256i8(<256 x i8> %va, <256 x i8> %vb, metadata !"eq", <256 x i1> %m, i32 %evl)
   ret <256 x i1> %v
@@ -1292,7 +1294,9 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <64 x i1> @llvm.vp.icmp.v64i32(<64 x i32> %va, <64 x i32> %vb, metadata !"eq", <64 x i1> %m, i32 %evl)
   ret <64 x i1> %v
@@ -1385,6 +1389,7 @@ define <8 x i1> @icmp_eq_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vmseq.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_v8i64:
@@ -1413,6 +1418,7 @@ define <8 x i1> @icmp_eq_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 z
 ; RV32-NEXT:    vmseq.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_swap_v8i64:
@@ -1474,6 +1480,7 @@ define <8 x i1> @icmp_ne_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vmsne.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_v8i64:
@@ -1502,6 +1509,7 @@ define <8 x i1> @icmp_ne_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 z
 ; RV32-NEXT:    vmsne.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_swap_v8i64:
@@ -1563,6 +1571,7 @@ define <8 x i1> @icmp_ugt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmsltu.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_v8i64:
@@ -1591,6 +1600,7 @@ define <8 x i1> @icmp_ugt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmsltu.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_swap_v8i64:
@@ -1652,6 +1662,7 @@ define <8 x i1> @icmp_uge_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmsleu.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_v8i64:
@@ -1682,6 +1693,7 @@ define <8 x i1> @icmp_uge_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmsleu.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_swap_v8i64:
@@ -1743,6 +1755,7 @@ define <8 x i1> @icmp_ult_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmsltu.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_v8i64:
@@ -1771,6 +1784,7 @@ define <8 x i1> @icmp_ult_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmsltu.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_swap_v8i64:
@@ -1832,6 +1846,7 @@ define <8 x i1> @icmp_sgt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmslt.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_v8i64:
@@ -1860,6 +1875,7 @@ define <8 x i1> @icmp_sgt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmslt.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_swap_v8i64:
@@ -1921,6 +1937,7 @@ define <8 x i1> @icmp_sge_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmsle.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_v8i64:
@@ -1951,6 +1968,7 @@ define <8 x i1> @icmp_sge_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmsle.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_swap_v8i64:
@@ -2012,6 +2030,7 @@ define <8 x i1> @icmp_slt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmslt.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_v8i64:
@@ -2040,6 +2059,7 @@ define <8 x i1> @icmp_slt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmslt.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_swap_v8i64:
@@ -2101,6 +2121,7 @@ define <8 x i1> @icmp_sle_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vmsle.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_v8i64:
@@ -2129,6 +2150,7 @@ define <8 x i1> @icmp_sle_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
 ; RV32-NEXT:    vmsle.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_swap_v8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1down.ll
index 0d8a9692442353..1169d19f93d7cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1down.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1down.ll
@@ -94,6 +94,7 @@ define <2 x i64> @vslide1down_2xi64(<2 x i64> %v, i64 %b) {
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vslide1down_2xi64:
@@ -119,6 +120,7 @@ define <4 x i64> @vslide1down_4xi64(<4 x i64> %v, i64 %b) {
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
 ; RV32-NEXT:    vslideup.vi v8, v10, 3
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vslide1down_4xi64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
index d1fb30c7daa3ea..500bd4a0ec977d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
@@ -101,6 +101,7 @@ define <2 x i64> @vslide1up_2xi64(<2 x i64> %v, i64 %b) {
 ; RV32-NEXT:    vslideup.vi v9, v8, 1
 ; RV32-NEXT:    vmv.v.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vslide1up_2xi64:
@@ -127,6 +128,7 @@ define <4 x i64> @vslide1up_4xi64(<4 x i64> %v, i64 %b) {
 ; RV32-NEXT:    vslideup.vi v10, v8, 1
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vslide1up_4xi64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 7513d31b54bd1a..ad55f276a74c8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -507,7 +507,9 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 6
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl)
   ret <128 x i32> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
index ea7f6beb22a7cb..3dd7121eb3d2c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
@@ -193,6 +193,7 @@ define <8 x i64> @vaaddu_vx_v8i64_floor(<8 x i64> %x, i64 %y) {
 ; RV32-NEXT:    csrwi vxrm, 2
 ; RV32-NEXT:    vaaddu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vaaddu_vx_v8i64_floor:
@@ -436,6 +437,7 @@ define <8 x i64> @vaaddu_vx_v8i64_ceil(<8 x i64> %x, i64 %y) {
 ; RV32-NEXT:    csrwi vxrm, 0
 ; RV32-NEXT:    vaaddu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vaaddu_vx_v8i64_ceil:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 6246ef7db0cb33..66f6673439c6ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -1000,6 +1000,7 @@ define <2 x i64> @vadd_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v2i64:
@@ -1026,6 +1027,7 @@ define <2 x i64> @vadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v2i64_unmasked:
@@ -1094,6 +1096,7 @@ define <4 x i64> @vadd_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v4i64:
@@ -1120,6 +1123,7 @@ define <4 x i64> @vadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v4i64_unmasked:
@@ -1188,6 +1192,7 @@ define <8 x i64> @vadd_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v8i64:
@@ -1214,6 +1219,7 @@ define <8 x i64> @vadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v8i64_unmasked:
@@ -1282,6 +1288,7 @@ define <16 x i64> @vadd_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v16i64:
@@ -1308,6 +1315,7 @@ define <16 x i64> @vadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
index c413dd86f37128..6cc878974862f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
@@ -867,6 +867,7 @@ define <2 x i64> @vand_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v2i64:
@@ -893,6 +894,7 @@ define <2 x i64> @vand_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v2i64_unmasked:
@@ -961,6 +963,7 @@ define <4 x i64> @vand_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v4i64:
@@ -987,6 +990,7 @@ define <4 x i64> @vand_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v4i64_unmasked:
@@ -1055,6 +1059,7 @@ define <8 x i64> @vand_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v8i64:
@@ -1081,6 +1086,7 @@ define <8 x i64> @vand_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v8i64_unmasked:
@@ -1149,6 +1155,7 @@ define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v11i64:
@@ -1175,6 +1182,7 @@ define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v11i64_unmasked:
@@ -1243,6 +1251,7 @@ define <16 x i64> @vand_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v16i64:
@@ -1269,6 +1278,7 @@ define <16 x i64> @vand_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
index 77a095303675f5..bc27e040b2998a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
@@ -339,7 +339,9 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
index e626727ffb8b4c..a0048f0146c16b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
@@ -619,6 +619,7 @@ define <2 x i64> @vdiv_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v2i64:
@@ -645,6 +646,7 @@ define <2 x i64> @vdiv_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v2i64_unmasked:
@@ -693,6 +695,7 @@ define <4 x i64> @vdiv_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v4i64:
@@ -719,6 +722,7 @@ define <4 x i64> @vdiv_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v4i64_unmasked:
@@ -767,6 +771,7 @@ define <8 x i64> @vdiv_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v8i64:
@@ -793,6 +798,7 @@ define <8 x i64> @vdiv_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v8i64_unmasked:
@@ -841,6 +847,7 @@ define <16 x i64> @vdiv_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v16i64:
@@ -867,6 +874,7 @@ define <16 x i64> @vdiv_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
index 3715449ef27f06..b0d1d2b42b0df3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
@@ -618,6 +618,7 @@ define <2 x i64> @vdivu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v2i64:
@@ -644,6 +645,7 @@ define <2 x i64> @vdivu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v2i64_unmasked:
@@ -692,6 +694,7 @@ define <4 x i64> @vdivu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v4i64:
@@ -718,6 +721,7 @@ define <4 x i64> @vdivu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v4i64_unmasked:
@@ -766,6 +770,7 @@ define <8 x i64> @vdivu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v8i64:
@@ -792,6 +797,7 @@ define <8 x i64> @vdivu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v8i64_unmasked:
@@ -840,6 +846,7 @@ define <16 x i64> @vdivu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v16i64:
@@ -866,6 +873,7 @@ define <16 x i64> @vdivu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index 6dcebc9763d82b..f03f1ec639eb65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -889,7 +889,9 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
@@ -956,7 +958,9 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index c83a298cb501ee..58acb32f287462 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -431,7 +431,9 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 60dbededb90a5c..0e148cb8f5404f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -431,7 +431,9 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
index 6c695b43d2718e..76a238f847260f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
@@ -677,7 +677,9 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
@@ -744,7 +746,9 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index afea1dc6d3c2a3..22ded979aa2039 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -122,7 +122,9 @@ define <64 x float> @vfwadd_v64f16(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, ptr %x
   %b = load <64 x half>, ptr %y
@@ -232,7 +234,9 @@ define <32 x double> @vfwadd_v32f32(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, ptr %x
   %b = load <32 x float>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmaccbf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmaccbf16.ll
index b953cf1f5bed88..9fc232d412df19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmaccbf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmaccbf16.ll
@@ -20,6 +20,7 @@ define <1 x float> @vfwmaccbf16_vv_v1f32(<1 x float> %a, <1 x bfloat> %b, <1 x b
 ; ZVFBFWMA-NEXT:    vle32.v v10, (a0)
 ; ZVFBFWMA-NEXT:    vfmacc.vv v8, v9, v10
 ; ZVFBFWMA-NEXT:    addi sp, sp, 16
+; ZVFBFWMA-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBMIN32-LABEL: vfwmaccbf16_vv_v1f32:
@@ -58,10 +59,15 @@ define <1 x float> @vfwmaccbf16_vv_v1f32(<1 x float> %a, <1 x bfloat> %b, <1 x b
 ; ZVFBMIN32-NEXT:    vfmacc.vv v8, v10, v9
 ; ZVFBMIN32-NEXT:    csrr a0, vlenb
 ; ZVFBMIN32-NEXT:    add sp, sp, a0
+; ZVFBMIN32-NEXT:    .cfi_def_cfa sp, 32
 ; ZVFBMIN32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; ZVFBMIN32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; ZVFBMIN32-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; ZVFBMIN32-NEXT:    .cfi_restore ra
+; ZVFBMIN32-NEXT:    .cfi_restore s0
+; ZVFBMIN32-NEXT:    .cfi_restore fs0
 ; ZVFBMIN32-NEXT:    addi sp, sp, 32
+; ZVFBMIN32-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFBMIN32-NEXT:    ret
 ;
 ; ZVFBMIN64-LABEL: vfwmaccbf16_vv_v1f32:
@@ -102,10 +108,15 @@ define <1 x float> @vfwmaccbf16_vv_v1f32(<1 x float> %a, <1 x bfloat> %b, <1 x b
 ; ZVFBMIN64-NEXT:    vfmacc.vv v8, v10, v9
 ; ZVFBMIN64-NEXT:    csrr a0, vlenb
 ; ZVFBMIN64-NEXT:    add sp, sp, a0
+; ZVFBMIN64-NEXT:    .cfi_def_cfa sp, 64
 ; ZVFBMIN64-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; ZVFBMIN64-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; ZVFBMIN64-NEXT:    fld fs0, 40(sp) # 8-byte Folded Reload
+; ZVFBMIN64-NEXT:    .cfi_restore ra
+; ZVFBMIN64-NEXT:    .cfi_restore s0
+; ZVFBMIN64-NEXT:    .cfi_restore fs0
 ; ZVFBMIN64-NEXT:    addi sp, sp, 64
+; ZVFBMIN64-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFBMIN64-NEXT:    ret
   %b.ext = fpext <1 x bfloat> %b to <1 x float>
   %c.ext = fpext <1 x bfloat> %c to <1 x float>
@@ -129,6 +140,7 @@ define <1 x float> @vfwmaccbf16_vf_v1f32(<1 x float> %a, bfloat %b, <1 x bfloat>
 ; ZVFBFWMA-NEXT:    vle32.v v10, (a0)
 ; ZVFBFWMA-NEXT:    vfmacc.vv v8, v9, v10
 ; ZVFBFWMA-NEXT:    addi sp, sp, 16
+; ZVFBFWMA-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBMIN32-LABEL: vfwmaccbf16_vf_v1f32:
@@ -163,9 +175,13 @@ define <1 x float> @vfwmaccbf16_vf_v1f32(<1 x float> %a, bfloat %b, <1 x bfloat>
 ; ZVFBMIN32-NEXT:    vfmacc.vv v8, v10, v9
 ; ZVFBMIN32-NEXT:    csrr a0, vlenb
 ; ZVFBMIN32-NEXT:    add sp, sp, a0
+; ZVFBMIN32-NEXT:    .cfi_def_cfa sp, 48
 ; ZVFBMIN32-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; ZVFBMIN32-NEXT:    fld fs0, 32(sp) # 8-byte Folded Reload
+; ZVFBMIN32-NEXT:    .cfi_restore ra
+; ZVFBMIN32-NEXT:    .cfi_restore fs0
 ; ZVFBMIN32-NEXT:    addi sp, sp, 48
+; ZVFBMIN32-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFBMIN32-NEXT:    ret
 ;
 ; ZVFBMIN64-LABEL: vfwmaccbf16_vf_v1f32:
@@ -202,9 +218,13 @@ define <1 x float> @vfwmaccbf16_vf_v1f32(<1 x float> %a, bfloat %b, <1 x bfloat>
 ; ZVFBMIN64-NEXT:    vfmacc.vv v8, v10, v9
 ; ZVFBMIN64-NEXT:    csrr a0, vlenb
 ; ZVFBMIN64-NEXT:    add sp, sp, a0
+; ZVFBMIN64-NEXT:    .cfi_def_cfa sp, 48
 ; ZVFBMIN64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; ZVFBMIN64-NEXT:    fld fs0, 32(sp) # 8-byte Folded Reload
+; ZVFBMIN64-NEXT:    .cfi_restore ra
+; ZVFBMIN64-NEXT:    .cfi_restore fs0
 ; ZVFBMIN64-NEXT:    addi sp, sp, 48
+; ZVFBMIN64-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFBMIN64-NEXT:    ret
   %b.head = insertelement <1 x bfloat> poison, bfloat %b, i32 0
   %b.splat = shufflevector <1 x bfloat> %b.head, <1 x bfloat> poison, <1 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
index 319994d2655651..551c4462d1de88 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
@@ -122,7 +122,9 @@ define <64 x float> @vfwmul_v64f16(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, ptr %x
   %b = load <64 x half>, ptr %y
@@ -232,7 +234,9 @@ define <32 x double> @vfwmul_v32f32(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, ptr %x
   %b = load <32 x float>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index 2c706cad9742ff..2246e9dae28bd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -122,7 +122,9 @@ define <64 x float> @vfwsub_v64f16(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, ptr %x
   %b = load <64 x half>, ptr %y
@@ -232,7 +234,9 @@ define <32 x double> @vfwsub_v32f32(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, ptr %x
   %b = load <32 x float>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
index 3dec7daf66ac9c..6508b430dbd06e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
@@ -422,6 +422,7 @@ define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsitofp_v1i7_v1f16:
@@ -436,6 +437,7 @@ define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <1 x half> %evec
@@ -454,6 +456,7 @@ define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <1 x half> %evec
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll
index 4805d6782a3b93..eef57e25cb00dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll
@@ -1384,6 +1384,7 @@ define <2 x i64> @vmacc_vx_nxv2i64(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x i1>
 ; RV32-NEXT:    vmacc.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv2i64:
@@ -1414,6 +1415,7 @@ define <2 x i64> @vmacc_vx_nxv2i64_unmasked(<2 x i64> %a, i64 %b, <2 x i64> %c,
 ; RV32-NEXT:    vmacc.vv v9, v8, v10
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv2i64_unmasked:
@@ -1457,6 +1459,7 @@ define <2 x i64> @vmacc_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x
 ; RV32-NEXT:    vmacc.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv2i64_ta:
@@ -1518,6 +1521,7 @@ define <4 x i64> @vmacc_vx_nxv4i64(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x i1>
 ; RV32-NEXT:    vmacc.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv4i64:
@@ -1548,6 +1552,7 @@ define <4 x i64> @vmacc_vx_nxv4i64_unmasked(<4 x i64> %a, i64 %b, <4 x i64> %c,
 ; RV32-NEXT:    vmacc.vv v10, v8, v12
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv4i64_unmasked:
@@ -1591,6 +1596,7 @@ define <4 x i64> @vmacc_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x
 ; RV32-NEXT:    vmacc.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv4i64_ta:
@@ -1652,6 +1658,7 @@ define <8 x i64> @vmacc_vx_nxv8i64(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x i1>
 ; RV32-NEXT:    vmacc.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv8i64:
@@ -1682,6 +1689,7 @@ define <8 x i64> @vmacc_vx_nxv8i64_unmasked(<8 x i64> %a, i64 %b, <8 x i64> %c,
 ; RV32-NEXT:    vmacc.vv v12, v8, v16
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv8i64_unmasked:
@@ -1725,6 +1733,7 @@ define <8 x i64> @vmacc_vx_nxv8i64_ta(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x
 ; RV32-NEXT:    vmacc.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv8i64_ta:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index 6adc6ba9621a8f..5dc3e3cbd5c4b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -752,6 +752,7 @@ define <2 x i64> @vmax_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v2i64:
@@ -778,6 +779,7 @@ define <2 x i64> @vmax_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v2i64_unmasked:
@@ -826,6 +828,7 @@ define <4 x i64> @vmax_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v4i64:
@@ -852,6 +855,7 @@ define <4 x i64> @vmax_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v4i64_unmasked:
@@ -900,6 +904,7 @@ define <8 x i64> @vmax_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v8i64:
@@ -926,6 +931,7 @@ define <8 x i64> @vmax_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v8i64_unmasked:
@@ -974,6 +980,7 @@ define <16 x i64> @vmax_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v16i64:
@@ -1000,6 +1007,7 @@ define <16 x i64> @vmax_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index baeb372c017e2e..78422cf284b27a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -751,6 +751,7 @@ define <2 x i64> @vmaxu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v2i64:
@@ -777,6 +778,7 @@ define <2 x i64> @vmaxu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v2i64_unmasked:
@@ -825,6 +827,7 @@ define <4 x i64> @vmaxu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v4i64:
@@ -851,6 +854,7 @@ define <4 x i64> @vmaxu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v4i64_unmasked:
@@ -899,6 +903,7 @@ define <8 x i64> @vmaxu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v8i64:
@@ -925,6 +930,7 @@ define <8 x i64> @vmaxu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v8i64_unmasked:
@@ -973,6 +979,7 @@ define <16 x i64> @vmaxu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v16i64:
@@ -999,6 +1006,7 @@ define <16 x i64> @vmaxu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index d0c21ce05c0254..116e39aa2ce370 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -752,6 +752,7 @@ define <2 x i64> @vmin_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v2i64:
@@ -778,6 +779,7 @@ define <2 x i64> @vmin_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v2i64_unmasked:
@@ -826,6 +828,7 @@ define <4 x i64> @vmin_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v4i64:
@@ -852,6 +855,7 @@ define <4 x i64> @vmin_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v4i64_unmasked:
@@ -900,6 +904,7 @@ define <8 x i64> @vmin_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v8i64:
@@ -926,6 +931,7 @@ define <8 x i64> @vmin_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v8i64_unmasked:
@@ -974,6 +980,7 @@ define <16 x i64> @vmin_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v16i64:
@@ -1000,6 +1007,7 @@ define <16 x i64> @vmin_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index a730ba4729d252..b2226f4661e00c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -751,6 +751,7 @@ define <2 x i64> @vminu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v2i64:
@@ -777,6 +778,7 @@ define <2 x i64> @vminu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v2i64_unmasked:
@@ -825,6 +827,7 @@ define <4 x i64> @vminu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v4i64:
@@ -851,6 +854,7 @@ define <4 x i64> @vminu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v4i64_unmasked:
@@ -899,6 +903,7 @@ define <8 x i64> @vminu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v8i64:
@@ -925,6 +930,7 @@ define <8 x i64> @vminu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v8i64_unmasked:
@@ -973,6 +979,7 @@ define <16 x i64> @vminu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v16i64:
@@ -999,6 +1006,7 @@ define <16 x i64> @vminu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
index 8970fbf740d235..e8e25f08d8ff40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
@@ -661,6 +661,7 @@ define <2 x i64> @vmul_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v2i64:
@@ -687,6 +688,7 @@ define <2 x i64> @vmul_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v2i64_unmasked:
@@ -735,6 +737,7 @@ define <4 x i64> @vmul_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v4i64:
@@ -761,6 +764,7 @@ define <4 x i64> @vmul_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v4i64_unmasked:
@@ -809,6 +813,7 @@ define <8 x i64> @vmul_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v8i64:
@@ -835,6 +840,7 @@ define <8 x i64> @vmul_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v8i64_unmasked:
@@ -883,6 +889,7 @@ define <16 x i64> @vmul_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v16i64:
@@ -909,6 +916,7 @@ define <16 x i64> @vmul_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll
index 805e2e2e6bd35d..7854cd91600d53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll
@@ -1384,6 +1384,7 @@ define <2 x i64> @vnmsac_vx_nxv2i64(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x i1
 ; RV32-NEXT:    vnmsac.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv2i64:
@@ -1414,6 +1415,7 @@ define <2 x i64> @vnmsac_vx_nxv2i64_unmasked(<2 x i64> %a, i64 %b, <2 x i64> %c,
 ; RV32-NEXT:    vnmsac.vv v9, v8, v10
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv2i64_unmasked:
@@ -1457,6 +1459,7 @@ define <2 x i64> @vnmsac_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x
 ; RV32-NEXT:    vnmsac.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv2i64_ta:
@@ -1518,6 +1521,7 @@ define <4 x i64> @vnmsac_vx_nxv4i64(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x i1
 ; RV32-NEXT:    vnmsac.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv4i64:
@@ -1548,6 +1552,7 @@ define <4 x i64> @vnmsac_vx_nxv4i64_unmasked(<4 x i64> %a, i64 %b, <4 x i64> %c,
 ; RV32-NEXT:    vnmsac.vv v10, v8, v12
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv4i64_unmasked:
@@ -1591,6 +1596,7 @@ define <4 x i64> @vnmsac_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x
 ; RV32-NEXT:    vnmsac.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv4i64_ta:
@@ -1652,6 +1658,7 @@ define <8 x i64> @vnmsac_vx_nxv8i64(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x i1
 ; RV32-NEXT:    vnmsac.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv8i64:
@@ -1682,6 +1689,7 @@ define <8 x i64> @vnmsac_vx_nxv8i64_unmasked(<8 x i64> %a, i64 %b, <8 x i64> %c,
 ; RV32-NEXT:    vnmsac.vv v12, v8, v16
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv8i64_unmasked:
@@ -1725,6 +1733,7 @@ define <8 x i64> @vnmsac_vx_nxv8i64_ta(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x
 ; RV32-NEXT:    vnmsac.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv8i64_ta:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
index 09c281b525a643..0a83b6d25ef3bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
@@ -921,6 +921,7 @@ define <2 x i64> @vor_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v2i64:
@@ -947,6 +948,7 @@ define <2 x i64> @vor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl)
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v2i64_unmasked:
@@ -1015,6 +1017,7 @@ define <4 x i64> @vor_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v4i64:
@@ -1041,6 +1044,7 @@ define <4 x i64> @vor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl)
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v4i64_unmasked:
@@ -1109,6 +1113,7 @@ define <8 x i64> @vor_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v8i64:
@@ -1135,6 +1140,7 @@ define <8 x i64> @vor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl)
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v8i64_unmasked:
@@ -1203,6 +1209,7 @@ define <16 x i64> @vor_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v16i64:
@@ -1229,6 +1236,7 @@ define <16 x i64> @vor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
index 2913cbdf0fffd2..86846242cbc16f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
@@ -193,6 +193,7 @@ define <1 x i64> @vp_splat_v1i64(i64 %val, <1 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_v1i64:
@@ -215,6 +216,7 @@ define <2 x i64> @vp_splat_v2i64(i64 %val, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_v2i64:
@@ -237,6 +239,7 @@ define <4 x i64> @vp_splat_v4i64(i64 %val, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_v4i64:
@@ -259,6 +262,7 @@ define <8 x i64> @vp_splat_v8i64(i64 %val, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_v8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index bdf76dc63ddd85..93cb1653480894 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -574,6 +574,7 @@ define <2 x i64> @vpmerge_vx_v2i64(i64 %a, <2 x i64> %vb, <2 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_v2i64:
@@ -594,6 +595,7 @@ define <2 x i64> @vpmerge_vx_v2i64(i64 %a, <2 x i64> %vb, <2 x i1> %m, i32 zeroe
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
 ; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vpmerge_vx_v2i64:
@@ -643,6 +645,7 @@ define <4 x i64> @vpmerge_vx_v4i64(i64 %a, <4 x i64> %vb, <4 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_v4i64:
@@ -663,6 +666,7 @@ define <4 x i64> @vpmerge_vx_v4i64(i64 %a, <4 x i64> %vb, <4 x i1> %m, i32 zeroe
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
 ; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vpmerge_vx_v4i64:
@@ -712,6 +716,7 @@ define <8 x i64> @vpmerge_vx_v8i64(i64 %a, <8 x i64> %vb, <8 x i1> %m, i32 zeroe
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_v8i64:
@@ -732,6 +737,7 @@ define <8 x i64> @vpmerge_vx_v8i64(i64 %a, <8 x i64> %vb, <8 x i1> %m, i32 zeroe
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
 ; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vpmerge_vx_v8i64:
@@ -781,6 +787,7 @@ define <16 x i64> @vpmerge_vx_v16i64(i64 %a, <16 x i64> %vb, <16 x i1> %m, i32 z
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_v16i64:
@@ -801,6 +808,7 @@ define <16 x i64> @vpmerge_vx_v16i64(i64 %a, <16 x i64> %vb, <16 x i1> %m, i32 z
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, tu, ma
 ; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vpmerge_vx_v16i64:
@@ -1209,7 +1217,9 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x double> @llvm.vp.merge.v32f64(<32 x i1> %m, <32 x double> %va, <32 x double> %vb, i32 %evl)
   ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index c0550398761915..ad6a6cfe113c71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -1728,7 +1728,9 @@ define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   call void @llvm.vp.scatter.v32f64.v32p0(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m, i32 %evl)
   ret void
@@ -1805,7 +1807,9 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %ptrs = getelementptr inbounds double, ptr %base, <32 x i32> %idxs
   call void @llvm.vp.scatter.v32f64.v32p0(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m, i32 %evl)
@@ -1891,7 +1895,9 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
 ; RV64-NEXT:    slli a1, a0, 3
 ; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %eidxs = sext <32 x i32> %idxs to <32 x i64>
   %ptrs = getelementptr inbounds double, ptr %base, <32 x i64> %eidxs
@@ -1978,7 +1984,9 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
 ; RV64-NEXT:    slli a1, a0, 3
 ; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %eidxs = zext <32 x i32> %idxs to <32 x i64>
   %ptrs = getelementptr inbounds double, ptr %base, <32 x i64> %eidxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
index aa76324f3804f1..87f97411dd402d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
@@ -619,6 +619,7 @@ define <2 x i64> @vrem_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v2i64:
@@ -645,6 +646,7 @@ define <2 x i64> @vrem_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v2i64_unmasked:
@@ -693,6 +695,7 @@ define <4 x i64> @vrem_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v4i64:
@@ -719,6 +722,7 @@ define <4 x i64> @vrem_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v4i64_unmasked:
@@ -767,6 +771,7 @@ define <8 x i64> @vrem_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v8i64:
@@ -793,6 +798,7 @@ define <8 x i64> @vrem_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v8i64_unmasked:
@@ -841,6 +847,7 @@ define <16 x i64> @vrem_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v16i64:
@@ -867,6 +874,7 @@ define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
index 24fa9357f91660..51b9fb85b2c993 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
@@ -618,6 +618,7 @@ define <2 x i64> @vremu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v2i64:
@@ -644,6 +645,7 @@ define <2 x i64> @vremu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v2i64_unmasked:
@@ -692,6 +694,7 @@ define <4 x i64> @vremu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v4i64:
@@ -718,6 +721,7 @@ define <4 x i64> @vremu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v4i64_unmasked:
@@ -766,6 +770,7 @@ define <8 x i64> @vremu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v8i64:
@@ -792,6 +797,7 @@ define <8 x i64> @vremu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v8i64_unmasked:
@@ -840,6 +846,7 @@ define <16 x i64> @vremu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v16i64:
@@ -866,6 +873,7 @@ define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
index 563482b88e8bdb..7dac83f0554c63 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
@@ -571,6 +571,7 @@ define <2 x i64> @vrsub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v2i64:
@@ -597,6 +598,7 @@ define <2 x i64> @vrsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v2i64_unmasked:
@@ -645,6 +647,7 @@ define <4 x i64> @vrsub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v10, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v4i64:
@@ -671,6 +674,7 @@ define <4 x i64> @vrsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v4i64_unmasked:
@@ -719,6 +723,7 @@ define <8 x i64> @vrsub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v12, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v8i64:
@@ -745,6 +750,7 @@ define <8 x i64> @vrsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v8i64_unmasked:
@@ -793,6 +799,7 @@ define <16 x i64> @vrsub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v16, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v16i64:
@@ -819,6 +826,7 @@ define <16 x i64> @vrsub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 5030fda9dea331..036d3513df53e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -1013,6 +1013,7 @@ define <2 x i64> @vsadd_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v2i64:
@@ -1039,6 +1040,7 @@ define <2 x i64> @vsadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v2i64_unmasked:
@@ -1107,6 +1109,7 @@ define <4 x i64> @vsadd_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v4i64:
@@ -1133,6 +1136,7 @@ define <4 x i64> @vsadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v4i64_unmasked:
@@ -1201,6 +1205,7 @@ define <8 x i64> @vsadd_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v8i64:
@@ -1227,6 +1232,7 @@ define <8 x i64> @vsadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v8i64_unmasked:
@@ -1295,6 +1301,7 @@ define <16 x i64> @vsadd_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v16i64:
@@ -1321,6 +1328,7 @@ define <16 x i64> @vsadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
index 741699289e0271..acdc2845400c5b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
@@ -448,6 +448,7 @@ define <2 x i64> @sadd_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_v2i64_vx:
@@ -495,6 +496,7 @@ define <4 x i64> @sadd_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_v4i64_vx:
@@ -542,6 +544,7 @@ define <8 x i64> @sadd_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_v8i64_vx:
@@ -589,6 +592,7 @@ define <16 x i64> @sadd_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_v16i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index 562399ea33e7a8..ab05990135f085 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -1009,6 +1009,7 @@ define <2 x i64> @vsaddu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v2i64:
@@ -1035,6 +1036,7 @@ define <2 x i64> @vsaddu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v2i64_unmasked:
@@ -1103,6 +1105,7 @@ define <4 x i64> @vsaddu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v4i64:
@@ -1129,6 +1132,7 @@ define <4 x i64> @vsaddu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v4i64_unmasked:
@@ -1197,6 +1201,7 @@ define <8 x i64> @vsaddu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v8i64:
@@ -1223,6 +1228,7 @@ define <8 x i64> @vsaddu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v8i64_unmasked:
@@ -1291,6 +1297,7 @@ define <16 x i64> @vsaddu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 ze
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v16i64:
@@ -1317,6 +1324,7 @@ define <16 x i64> @vsaddu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
index 7b2cab294aa496..5e682662ef9d05 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
@@ -448,6 +448,7 @@ define <2 x i64> @uadd_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_v2i64_vx:
@@ -495,6 +496,7 @@ define <4 x i64> @uadd_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_v4i64_vx:
@@ -542,6 +544,7 @@ define <8 x i64> @uadd_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_v8i64_vx:
@@ -589,6 +592,7 @@ define <16 x i64> @uadd_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_v16i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
index 4f533f2055bf34..dc83edba5ae8cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
@@ -107,7 +107,9 @@ define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
 ; CHECK-NEXT:    li a1, 48
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %c = add <512 x i8> %a, %b
   ret <512 x i8> %c
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index 0a2ed3eb1ffbf7..7b7242c16c261c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -201,7 +201,9 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 %evl)
   ret <256 x i8> %v
@@ -259,7 +261,9 @@ define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 129)
   ret <256 x i8> %v
@@ -447,7 +451,9 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 %evl)
   ret <32 x i64> %v
@@ -490,7 +496,9 @@ define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17)
   ret <32 x i64> %v
@@ -630,7 +638,9 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <64 x float> @llvm.vp.select.v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 %evl)
   ret <64 x float> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 549c6ca11e320e..d557734f0dbe73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -1045,6 +1045,7 @@ define <2 x i64> @vssub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v2i64:
@@ -1071,6 +1072,7 @@ define <2 x i64> @vssub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v2i64_unmasked:
@@ -1141,6 +1143,7 @@ define <4 x i64> @vssub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v4i64:
@@ -1167,6 +1170,7 @@ define <4 x i64> @vssub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v4i64_unmasked:
@@ -1237,6 +1241,7 @@ define <8 x i64> @vssub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v8i64:
@@ -1263,6 +1268,7 @@ define <8 x i64> @vssub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v8i64_unmasked:
@@ -1333,6 +1339,7 @@ define <16 x i64> @vssub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v16i64:
@@ -1359,6 +1366,7 @@ define <16 x i64> @vssub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
index efe28eb9021ce2..e10c6480634d74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
@@ -448,6 +448,7 @@ define <2 x i64> @ssub_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_v2i64_vx:
@@ -496,6 +497,7 @@ define <4 x i64> @ssub_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_v4i64_vx:
@@ -544,6 +546,7 @@ define <8 x i64> @ssub_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_v8i64_vx:
@@ -592,6 +595,7 @@ define <16 x i64> @ssub_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_v16i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 683f1150310b39..33b9c28468393e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -1040,6 +1040,7 @@ define <2 x i64> @vssubu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v2i64:
@@ -1066,6 +1067,7 @@ define <2 x i64> @vssubu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v2i64_unmasked:
@@ -1136,6 +1138,7 @@ define <4 x i64> @vssubu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v4i64:
@@ -1162,6 +1165,7 @@ define <4 x i64> @vssubu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v4i64_unmasked:
@@ -1232,6 +1236,7 @@ define <8 x i64> @vssubu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroex
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v8i64:
@@ -1258,6 +1263,7 @@ define <8 x i64> @vssubu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %e
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v8i64_unmasked:
@@ -1328,6 +1334,7 @@ define <16 x i64> @vssubu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 ze
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v16i64:
@@ -1354,6 +1361,7 @@ define <16 x i64> @vssubu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
index dc9279f6e7fa09..42e51bfcb52ef1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
@@ -448,6 +448,7 @@ define <2 x i64> @usub_v2i64_vx(<2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_v2i64_vx:
@@ -496,6 +497,7 @@ define <4 x i64> @usub_v4i64_vx(<4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_v4i64_vx:
@@ -544,6 +546,7 @@ define <8 x i64> @usub_v8i64_vx(<8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_v8i64_vx:
@@ -592,6 +595,7 @@ define <16 x i64> @usub_v16i64_vx(<16 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_v16i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
index 6052c9ee20fe10..6c8c21b83aab36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
@@ -649,6 +649,7 @@ define <2 x i64> @vsub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v2i64:
@@ -675,6 +676,7 @@ define <2 x i64> @vsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v2i64_unmasked:
@@ -723,6 +725,7 @@ define <4 x i64> @vsub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v4i64:
@@ -749,6 +752,7 @@ define <4 x i64> @vsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v4i64_unmasked:
@@ -797,6 +801,7 @@ define <8 x i64> @vsub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v8i64:
@@ -823,6 +828,7 @@ define <8 x i64> @vsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v8i64_unmasked:
@@ -871,6 +877,7 @@ define <16 x i64> @vsub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v16i64:
@@ -897,6 +904,7 @@ define <16 x i64> @vsub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index 97c7f101c25827..51757281237053 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -306,7 +306,9 @@ define <128 x i16> @vwmul_v128i16(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, ptr %x
   %b = load <128 x i8>, ptr %y
@@ -353,7 +355,9 @@ define <64 x i32> @vwmul_v64i32(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <64 x i16>, ptr %x
   %b = load <64 x i16>, ptr %y
@@ -399,7 +403,9 @@ define <32 x i64> @vwmul_v32i64(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <32 x i32>, ptr %x
   %b = load <32 x i32>, ptr %y
@@ -864,6 +870,7 @@ define <2 x i64> @vwmul_vx_v2i64_i64(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmul_vx_v2i64_i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index c73b3a0dce6be4..cc69617ac8047b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -298,7 +298,9 @@ define <128 x i16> @vwmulsu_v128i16(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, ptr %x
   %b = load <128 x i8>, ptr %y
@@ -345,7 +347,9 @@ define <64 x i32> @vwmulsu_v64i32(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <64 x i16>, ptr %x
   %b = load <64 x i16>, ptr %y
@@ -391,7 +395,9 @@ define <32 x i64> @vwmulsu_v32i64(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <32 x i32>, ptr %x
   %b = load <32 x i32>, ptr %y
@@ -801,6 +807,7 @@ define <2 x i64> @vwmulsu_vx_v2i64_i8(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmulsu_vx_v2i64_i8:
@@ -835,6 +842,7 @@ define <2 x i64> @vwmulsu_vx_v2i64_i16(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmulsu_vx_v2i64_i16:
@@ -869,6 +877,7 @@ define <2 x i64> @vwmulsu_vx_v2i64_i32(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmulsu_vx_v2i64_i32:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index 17a76ae5e7f75e..007b561a2247ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -282,7 +282,9 @@ define <128 x i16> @vwmulu_v128i16(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, ptr %x
   %b = load <128 x i8>, ptr %y
@@ -329,7 +331,9 @@ define <64 x i32> @vwmulu_v64i32(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <64 x i16>, ptr %x
   %b = load <64 x i16>, ptr %y
@@ -375,7 +379,9 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = load <32 x i32>, ptr %x
   %b = load <32 x i32>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
index 16487a0784125e..374d3a797af423 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
@@ -1181,6 +1181,7 @@ define <2 x i64> @vxor_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v2i64:
@@ -1207,6 +1208,7 @@ define <2 x i64> @vxor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v2i64_unmasked:
@@ -1295,6 +1297,7 @@ define <4 x i64> @vxor_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v4i64:
@@ -1321,6 +1324,7 @@ define <4 x i64> @vxor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v4i64_unmasked:
@@ -1409,6 +1413,7 @@ define <8 x i64> @vxor_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v8i64:
@@ -1435,6 +1440,7 @@ define <8 x i64> @vxor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v8i64_unmasked:
@@ -1523,6 +1529,7 @@ define <16 x i64> @vxor_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v16i64:
@@ -1549,6 +1556,7 @@ define <16 x i64> @vxor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_v16i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index 03d1fb6c8d297f..b4ff60117c51f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -343,7 +343,9 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.floor.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -411,7 +413,9 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.floor.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -930,7 +934,9 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1013,7 +1019,9 @@ define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1513,7 +1521,9 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index d8c3ab27cfad12..d9f77d0bccd717 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -133,7 +133,9 @@ define <vscale x 16 x bfloat> @vfmax_nxv16bf16_vv(<vscale x 16 x bfloat> %a, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.maximum.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b)
   ret <vscale x 16 x bfloat> %v
@@ -367,7 +369,9 @@ define <vscale x 16 x half> @vfmax_nxv16f16_vv(<vscale x 16 x half> %a, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
   ret <vscale x 16 x half> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index dd01e1c1ee66d0..6e3a6fa1d09768 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -245,7 +245,9 @@ define <vscale x 16 x bfloat> @vfmax_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.maximum.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -280,7 +282,9 @@ define <vscale x 16 x bfloat> @vfmax_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.maximum.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -429,7 +433,9 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    slli a1, a0, 5
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.maximum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -531,7 +537,9 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.maximum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -880,7 +888,9 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -926,7 +936,9 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
@@ -960,7 +972,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 3
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_nxv32f16:
@@ -1103,7 +1117,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    slli a1, a0, 5
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1216,7 +1232,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1496,7 +1514,9 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x double> @llvm.vp.maximum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
@@ -1686,7 +1706,9 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    li a1, 35
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -1757,7 +1779,9 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 2371840002f40b..2a045ad54f8113 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -133,7 +133,9 @@ define <vscale x 16 x bfloat> @vfmin_nxv16bf16_vv(<vscale x 16 x bfloat> %a, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.minimum.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b)
   ret <vscale x 16 x bfloat> %v
@@ -367,7 +369,9 @@ define <vscale x 16 x half> @vfmin_nxv16f16_vv(<vscale x 16 x half> %a, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
   ret <vscale x 16 x half> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 85cac8d1870594..bcd4c4ce619c44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -245,7 +245,9 @@ define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.minimum.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -280,7 +282,9 @@ define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.minimum.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -429,7 +433,9 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    slli a1, a0, 5
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.minimum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -531,7 +537,9 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.minimum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -880,7 +888,9 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -926,7 +936,9 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
@@ -960,7 +972,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 3
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_nxv32f16:
@@ -1103,7 +1117,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    slli a1, a0, 5
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1216,7 +1232,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1496,7 +1514,9 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
@@ -1686,7 +1706,9 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    li a1, 35
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -1757,7 +1779,9 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
index 9498c65ba9a176..2a5af0fc9ef397 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
@@ -171,7 +171,9 @@ define <vscale x 32 x bfloat> @nearbyint_nxv32bf16(<vscale x 32 x bfloat> %x) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x bfloat> @llvm.nearbyint.nxv32bf16(<vscale x 32 x bfloat> %x)
   ret <vscale x 32 x bfloat> %a
@@ -431,7 +433,9 @@ define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %x) {
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %a = call <vscale x 32 x half> @llvm.nearbyint.nxv32f16(<vscale x 32 x half> %x)
   ret <vscale x 32 x half> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 5a880105f68379..6cf449434a22b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -392,7 +392,16 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
 ; CHECK-NOV-NEXT:    addi sp, sp, 64
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB6_10: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a4
@@ -486,11 +495,17 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a0, 1
 ; CHECK-V-NEXT:    add a0, a1, a0
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
 ; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 48
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <4 x half> %x to <4 x i64>
@@ -567,7 +582,16 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
 ; CHECK-NOV-NEXT:    addi sp, sp, 64
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB7_6: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a1
@@ -649,11 +673,17 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a0, 1
 ; CHECK-V-NEXT:    add a0, a1, a0
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
 ; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 48
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <4 x half> %x to <4 x i64>
@@ -740,7 +770,16 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
 ; CHECK-NOV-NEXT:    addi sp, sp, 64
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB8_6: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a2
@@ -823,11 +862,17 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a0, 1
 ; CHECK-V-NEXT:    add a0, a1, a0
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
 ; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 48
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <4 x half> %x to <4 x i64>
@@ -1297,7 +1342,24 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore s4
+; CHECK-NOV-NEXT:    .cfi_restore s5
+; CHECK-NOV-NEXT:    .cfi_restore s6
+; CHECK-NOV-NEXT:    .cfi_restore s7
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
+; CHECK-NOV-NEXT:    .cfi_restore fs3
+; CHECK-NOV-NEXT:    .cfi_restore fs4
+; CHECK-NOV-NEXT:    .cfi_restore fs5
+; CHECK-NOV-NEXT:    .cfi_restore fs6
 ; CHECK-NOV-NEXT:    addi sp, sp, 128
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB15_18: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a7
@@ -1494,6 +1556,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 2
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -1502,7 +1565,16 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
+; CHECK-V-NEXT:    .cfi_restore s3
+; CHECK-V-NEXT:    .cfi_restore s4
+; CHECK-V-NEXT:    .cfi_restore s5
+; CHECK-V-NEXT:    .cfi_restore s6
 ; CHECK-V-NEXT:    addi sp, sp, 80
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <8 x half> %x to <8 x i32>
@@ -1635,7 +1707,24 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore s4
+; CHECK-NOV-NEXT:    .cfi_restore s5
+; CHECK-NOV-NEXT:    .cfi_restore s6
+; CHECK-NOV-NEXT:    .cfi_restore s7
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
+; CHECK-NOV-NEXT:    .cfi_restore fs3
+; CHECK-NOV-NEXT:    .cfi_restore fs4
+; CHECK-NOV-NEXT:    .cfi_restore fs5
+; CHECK-NOV-NEXT:    .cfi_restore fs6
 ; CHECK-NOV-NEXT:    addi sp, sp, 128
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB16_10: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a1
@@ -1807,6 +1896,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 2
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -1815,7 +1905,16 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
+; CHECK-V-NEXT:    .cfi_restore s3
+; CHECK-V-NEXT:    .cfi_restore s4
+; CHECK-V-NEXT:    .cfi_restore s5
+; CHECK-V-NEXT:    .cfi_restore s6
 ; CHECK-V-NEXT:    addi sp, sp, 80
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <8 x half> %x to <8 x i32>
@@ -1970,7 +2069,24 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore s4
+; CHECK-NOV-NEXT:    .cfi_restore s5
+; CHECK-NOV-NEXT:    .cfi_restore s6
+; CHECK-NOV-NEXT:    .cfi_restore s7
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
+; CHECK-NOV-NEXT:    .cfi_restore fs3
+; CHECK-NOV-NEXT:    .cfi_restore fs4
+; CHECK-NOV-NEXT:    .cfi_restore fs5
+; CHECK-NOV-NEXT:    .cfi_restore fs6
 ; CHECK-NOV-NEXT:    addi sp, sp, 128
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB17_10: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a3
@@ -2143,6 +2259,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 2
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -2151,7 +2268,16 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
+; CHECK-V-NEXT:    .cfi_restore s3
+; CHECK-V-NEXT:    .cfi_restore s4
+; CHECK-V-NEXT:    .cfi_restore s5
+; CHECK-V-NEXT:    .cfi_restore s6
 ; CHECK-V-NEXT:    addi sp, sp, 80
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <8 x half> %x to <8 x i32>
@@ -2242,7 +2368,12 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: stest_f64i64:
@@ -2325,10 +2456,15 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x double> %x to <2 x i128>
@@ -2370,7 +2506,12 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: utest_f64i64:
@@ -2408,10 +2549,15 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <2 x double> %x to <2 x i128>
@@ -2477,7 +2623,12 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: ustest_f64i64:
@@ -2542,10 +2693,15 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x double> %x to <2 x i128>
@@ -2634,7 +2790,12 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: stest_f32i64:
@@ -2717,10 +2878,15 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x float> %x to <2 x i128>
@@ -2762,7 +2928,12 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: utest_f32i64:
@@ -2800,10 +2971,15 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <2 x float> %x to <2 x i128>
@@ -2869,7 +3045,12 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: ustest_f32i64:
@@ -2934,10 +3115,15 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x float> %x to <2 x i128>
@@ -3029,7 +3215,12 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: stest_f16i64:
@@ -3112,7 +3303,12 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 32
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x half> %x to <2 x i128>
@@ -3157,7 +3353,12 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: utesth_f16i64:
@@ -3195,7 +3396,12 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 32
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <2 x half> %x to <2 x i128>
@@ -3264,7 +3470,12 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: ustest_f16i64:
@@ -3329,7 +3540,12 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 32
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x half> %x to <2 x i128>
@@ -3721,7 +3937,16 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
 ; CHECK-NOV-NEXT:    addi sp, sp, 64
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB33_10: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a4
@@ -3815,11 +4040,17 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a0, 1
 ; CHECK-V-NEXT:    add a0, a1, a0
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
 ; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 48
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <4 x half> %x to <4 x i64>
@@ -3894,7 +4125,16 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
 ; CHECK-NOV-NEXT:    addi sp, sp, 64
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB34_6: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a1
@@ -3976,11 +4216,17 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a0, 1
 ; CHECK-V-NEXT:    add a0, a1, a0
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
 ; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 48
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <4 x half> %x to <4 x i64>
@@ -4066,7 +4312,16 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
 ; CHECK-NOV-NEXT:    addi sp, sp, 64
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB35_6: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a2
@@ -4149,11 +4404,17 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a0, 1
 ; CHECK-V-NEXT:    add a0, a1, a0
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
 ; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 48
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <4 x half> %x to <4 x i64>
@@ -4611,7 +4872,24 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore s4
+; CHECK-NOV-NEXT:    .cfi_restore s5
+; CHECK-NOV-NEXT:    .cfi_restore s6
+; CHECK-NOV-NEXT:    .cfi_restore s7
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
+; CHECK-NOV-NEXT:    .cfi_restore fs3
+; CHECK-NOV-NEXT:    .cfi_restore fs4
+; CHECK-NOV-NEXT:    .cfi_restore fs5
+; CHECK-NOV-NEXT:    .cfi_restore fs6
 ; CHECK-NOV-NEXT:    addi sp, sp, 128
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB42_18: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a7
@@ -4808,6 +5086,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 2
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -4816,7 +5095,16 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
+; CHECK-V-NEXT:    .cfi_restore s3
+; CHECK-V-NEXT:    .cfi_restore s4
+; CHECK-V-NEXT:    .cfi_restore s5
+; CHECK-V-NEXT:    .cfi_restore s6
 ; CHECK-V-NEXT:    addi sp, sp, 80
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <8 x half> %x to <8 x i32>
@@ -4947,7 +5235,24 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore s4
+; CHECK-NOV-NEXT:    .cfi_restore s5
+; CHECK-NOV-NEXT:    .cfi_restore s6
+; CHECK-NOV-NEXT:    .cfi_restore s7
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
+; CHECK-NOV-NEXT:    .cfi_restore fs3
+; CHECK-NOV-NEXT:    .cfi_restore fs4
+; CHECK-NOV-NEXT:    .cfi_restore fs5
+; CHECK-NOV-NEXT:    .cfi_restore fs6
 ; CHECK-NOV-NEXT:    addi sp, sp, 128
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB43_10: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a1
@@ -5119,6 +5424,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 2
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -5127,7 +5433,16 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
+; CHECK-V-NEXT:    .cfi_restore s3
+; CHECK-V-NEXT:    .cfi_restore s4
+; CHECK-V-NEXT:    .cfi_restore s5
+; CHECK-V-NEXT:    .cfi_restore s6
 ; CHECK-V-NEXT:    addi sp, sp, 80
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <8 x half> %x to <8 x i32>
@@ -5281,7 +5596,24 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
+; CHECK-NOV-NEXT:    .cfi_restore s3
+; CHECK-NOV-NEXT:    .cfi_restore s4
+; CHECK-NOV-NEXT:    .cfi_restore s5
+; CHECK-NOV-NEXT:    .cfi_restore s6
+; CHECK-NOV-NEXT:    .cfi_restore s7
+; CHECK-NOV-NEXT:    .cfi_restore fs0
+; CHECK-NOV-NEXT:    .cfi_restore fs1
+; CHECK-NOV-NEXT:    .cfi_restore fs2
+; CHECK-NOV-NEXT:    .cfi_restore fs3
+; CHECK-NOV-NEXT:    .cfi_restore fs4
+; CHECK-NOV-NEXT:    .cfi_restore fs5
+; CHECK-NOV-NEXT:    .cfi_restore fs6
 ; CHECK-NOV-NEXT:    addi sp, sp, 128
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB44_10: # %entry
 ; CHECK-NOV-NEXT:    mv a0, a3
@@ -5454,6 +5786,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 2
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
@@ -5462,7 +5795,16 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
+; CHECK-V-NEXT:    .cfi_restore s3
+; CHECK-V-NEXT:    .cfi_restore s4
+; CHECK-V-NEXT:    .cfi_restore s5
+; CHECK-V-NEXT:    .cfi_restore s6
 ; CHECK-V-NEXT:    addi sp, sp, 80
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <8 x half> %x to <8 x i32>
@@ -5554,7 +5896,12 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: stest_f64i64_mm:
@@ -5640,10 +5987,15 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x double> %x to <2 x i128>
@@ -5683,7 +6035,12 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: utest_f64i64_mm:
@@ -5724,10 +6081,15 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <2 x double> %x to <2 x i128>
@@ -5781,7 +6143,12 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: ustest_f64i64_mm:
@@ -5835,10 +6202,15 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x double> %x to <2 x i128>
@@ -5928,7 +6300,12 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: stest_f32i64_mm:
@@ -6014,10 +6391,15 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x float> %x to <2 x i128>
@@ -6057,7 +6439,12 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: utest_f32i64_mm:
@@ -6098,10 +6485,15 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <2 x float> %x to <2 x i128>
@@ -6155,7 +6547,12 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore fs0
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: ustest_f32i64_mm:
@@ -6209,10 +6606,15 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    add sp, sp, a0
+; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
 ; CHECK-V-NEXT:    addi sp, sp, 64
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x float> %x to <2 x i128>
@@ -6305,7 +6707,12 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: stest_f16i64_mm:
@@ -6391,7 +6798,12 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 32
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x half> %x to <2 x i128>
@@ -6433,7 +6845,12 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: utesth_f16i64_mm:
@@ -6471,7 +6888,12 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 32
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptoui <2 x half> %x to <2 x i128>
@@ -6528,7 +6950,12 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT:    .cfi_restore ra
+; CHECK-NOV-NEXT:    .cfi_restore s0
+; CHECK-NOV-NEXT:    .cfi_restore s1
+; CHECK-NOV-NEXT:    .cfi_restore s2
 ; CHECK-NOV-NEXT:    addi sp, sp, 32
+; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ;
 ; CHECK-V-LABEL: ustest_f16i64_mm:
@@ -6582,7 +7009,12 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT:    .cfi_restore ra
+; CHECK-V-NEXT:    .cfi_restore s0
+; CHECK-V-NEXT:    .cfi_restore s1
+; CHECK-V-NEXT:    .cfi_restore s2
 ; CHECK-V-NEXT:    addi sp, sp, 32
+; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-V-NEXT:    ret
 entry:
   %conv = fptosi <2 x half> %x to <2 x i128>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 2c5a3dfffc2cfc..cf35e9c40b8a73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -231,7 +231,9 @@ define <vscale x 64 x i8> @fshr_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vp.fshr.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %res
@@ -262,7 +264,9 @@ define <vscale x 64 x i8> @fshl_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vp.fshl.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %res
@@ -463,7 +467,9 @@ define <vscale x 32 x i16> @fshr_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vp.fshr.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %res
@@ -494,7 +500,9 @@ define <vscale x 32 x i16> @fshl_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vp.fshl.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %res
@@ -671,7 +679,9 @@ define <vscale x 16 x i32> @fshr_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vp.fshr.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %res
@@ -705,7 +715,9 @@ define <vscale x 16 x i32> @fshl_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vp.fshl.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %res
@@ -846,7 +858,9 @@ define <vscale x 7 x i64> @fshr_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 7 x i64> @llvm.vp.fshr.nxv7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x i64> %res
@@ -880,7 +894,9 @@ define <vscale x 7 x i64> @fshl_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 7 x i64> @llvm.vp.fshl.nxv7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x i64> %res
@@ -913,7 +929,9 @@ define <vscale x 8 x i64> @fshr_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vp.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %res
@@ -947,7 +965,9 @@ define <vscale x 8 x i64> @fshl_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vp.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %res
@@ -1114,7 +1134,9 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
 ; CHECK-NEXT:    li a1, 48
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i64> @llvm.vp.fshr.nxv16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i64> %res
@@ -1270,7 +1292,9 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
 ; CHECK-NEXT:    li a1, 40
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i64> @llvm.vp.fshl.nxv16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i64> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
index 3b41e92f437309..8f6276fcf97b75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
@@ -36,6 +36,7 @@ body: |
   ; CHECK-NEXT:   $x10 = frame-destroy PseudoReadVLENB
   ; CHECK-NEXT:   $x10 = frame-destroy SLLI killed $x10, 1
   ; CHECK-NEXT:   $x2 = frame-destroy ADD $x2, killed $x10
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa $x2, 0
   ; CHECK-NEXT:   PseudoRET
   bb.0:
   bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
index 04367028b12d82..7990c1c1eabc24 100644
--- a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
@@ -26,7 +26,9 @@ define i32 @vscale_known_nonzero() {
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    lbu a0, 0(a0)
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %x = call i32 @llvm.vscale()
   %r = call i32 @llvm.cttz.i32(i32 %x, i1 false)
diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
index 22a7425bf98b8e..559362e6d6274b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
@@ -34,9 +34,13 @@
   ; CHECK-NEXT:    ld a0, 8(sp)
   ; CHECK-NEXT:    call spillslot
   ; CHECK-NEXT:    addi sp, s0, -2032
+  ; CHECK-NEXT:    .cfi_def_cfa sp, 2032
   ; CHECK-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
   ; CHECK-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+  ; CHECK-NEXT:    .cfi_restore ra
+  ; CHECK-NEXT:    .cfi_restore s0
   ; CHECK-NEXT:    addi sp, sp, 2032
+  ; CHECK-NEXT:    .cfi_def_cfa_offset 0
   ; CHECK-NEXT:    ret
     ret void
   }
diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
index 90bf29d7760112..ad8fde013ce084 100644
--- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
@@ -21,7 +21,9 @@ define void @local_var_mf8() {
 ; RV64IV-NEXT:    csrr a0, vlenb
 ; RV64IV-NEXT:    slli a0, a0, 1
 ; RV64IV-NEXT:    add sp, sp, a0
+; RV64IV-NEXT:    .cfi_def_cfa sp, 16
 ; RV64IV-NEXT:    addi sp, sp, 16
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 1 x i8>
   %local1 = alloca <vscale x 1 x i8>
@@ -48,7 +50,9 @@ define void @local_var_m1() {
 ; RV64IV-NEXT:    csrr a0, vlenb
 ; RV64IV-NEXT:    slli a0, a0, 1
 ; RV64IV-NEXT:    add sp, sp, a0
+; RV64IV-NEXT:    .cfi_def_cfa sp, 16
 ; RV64IV-NEXT:    addi sp, sp, 16
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 8 x i8>
   %local1 = alloca <vscale x 8 x i8>
@@ -76,7 +80,9 @@ define void @local_var_m2() {
 ; RV64IV-NEXT:    csrr a0, vlenb
 ; RV64IV-NEXT:    slli a0, a0, 2
 ; RV64IV-NEXT:    add sp, sp, a0
+; RV64IV-NEXT:    .cfi_def_cfa sp, 16
 ; RV64IV-NEXT:    addi sp, sp, 16
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 16 x i8>
   %local1 = alloca <vscale x 16 x i8>
@@ -108,9 +114,13 @@ define void @local_var_m4() {
 ; RV64IV-NEXT:    addi a0, sp, 32
 ; RV64IV-NEXT:    vl4r.v v8, (a0)
 ; RV64IV-NEXT:    addi sp, s0, -48
+; RV64IV-NEXT:    .cfi_def_cfa sp, 48
 ; RV64IV-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64IV-NEXT:    .cfi_restore ra
+; RV64IV-NEXT:    .cfi_restore s0
 ; RV64IV-NEXT:    addi sp, sp, 48
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 32 x i8>
   %local1 = alloca <vscale x 32 x i8>
@@ -142,9 +152,13 @@ define void @local_var_m8() {
 ; RV64IV-NEXT:    addi a0, sp, 64
 ; RV64IV-NEXT:    vl8r.v v8, (a0)
 ; RV64IV-NEXT:    addi sp, s0, -80
+; RV64IV-NEXT:    .cfi_def_cfa sp, 80
 ; RV64IV-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64IV-NEXT:    .cfi_restore ra
+; RV64IV-NEXT:    .cfi_restore s0
 ; RV64IV-NEXT:    addi sp, sp, 80
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 64 x i8>
   %local1 = alloca <vscale x 64 x i8>
@@ -174,7 +188,9 @@ define void @local_var_m2_mix_local_scalar() {
 ; RV64IV-NEXT:    csrr a0, vlenb
 ; RV64IV-NEXT:    slli a0, a0, 2
 ; RV64IV-NEXT:    add sp, sp, a0
+; RV64IV-NEXT:    .cfi_def_cfa sp, 16
 ; RV64IV-NEXT:    addi sp, sp, 16
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local_scalar0 = alloca i32
   %local0 = alloca <vscale x 16 x i8>
@@ -223,10 +239,15 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
 ; RV64IV-NEXT:    addi a0, a0, -32
 ; RV64IV-NEXT:    vl2r.v v8, (a0)
 ; RV64IV-NEXT:    addi sp, s0, -32
+; RV64IV-NEXT:    .cfi_def_cfa sp, 32
 ; RV64IV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IV-NEXT:    .cfi_restore ra
+; RV64IV-NEXT:    .cfi_restore s0
+; RV64IV-NEXT:    .cfi_restore s1
 ; RV64IV-NEXT:    addi sp, sp, 32
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %1 = alloca i8, i64 %n
   %2 = alloca <vscale x 16 x i8>
@@ -277,11 +298,17 @@ define void @local_var_m2_with_bp(i64 %n) {
 ; RV64IV-NEXT:    vl2r.v v8, (a0)
 ; RV64IV-NEXT:    lw zero, 120(s1)
 ; RV64IV-NEXT:    addi sp, s0, -256
+; RV64IV-NEXT:    .cfi_def_cfa sp, 256
 ; RV64IV-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s1, 232(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s2, 224(sp) # 8-byte Folded Reload
+; RV64IV-NEXT:    .cfi_restore ra
+; RV64IV-NEXT:    .cfi_restore s0
+; RV64IV-NEXT:    .cfi_restore s1
+; RV64IV-NEXT:    .cfi_restore s2
 ; RV64IV-NEXT:    addi sp, sp, 256
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %1 = alloca i8, i64 %n
   %2 = alloca i32, align 128
diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index 87dd0048d928a9..8190a82d7035b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -60,9 +60,13 @@ define <vscale x 64 x i8> @caller() {
 ; RV64IV-NEXT:    vs8r.v v24, (a1)
 ; RV64IV-NEXT:    call callee
 ; RV64IV-NEXT:    addi sp, s0, -80
+; RV64IV-NEXT:    .cfi_def_cfa sp, 80
 ; RV64IV-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64IV-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64IV-NEXT:    .cfi_restore ra
+; RV64IV-NEXT:    .cfi_restore s0
 ; RV64IV-NEXT:    addi sp, sp, 80
+; RV64IV-NEXT:    .cfi_def_cfa_offset 0
 ; RV64IV-NEXT:    ret
   %local0 = alloca <vscale x 64 x i8>
   %local1 = alloca <vscale x 64 x i8>
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 189ba08dddc7aa..dcb93c47ee2739 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1252,7 +1252,9 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
   %p1 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> %p0, <vscale x 8 x ptr> %ptrs1, i64 8)
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 29db67b4b0a41f..885403484087af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -1925,7 +1925,9 @@ define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
   %p1 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> %p0, <vscale x 8 x ptr> %ptrs1, i64 8)
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index 9d0cb22eb5f475..7950d7a95c4c97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -1879,9 +1879,13 @@ define <vscale x 12 x i64> @reverse_nxv12i64(<vscale x 12 x i64> %a) {
 ; RV32-NEXT:    vl8re64.v v16, (a0)
 ; RV32-NEXT:    vl8re64.v v8, (a1)
 ; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    .cfi_def_cfa sp, 80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: reverse_nxv12i64:
@@ -1924,9 +1928,13 @@ define <vscale x 12 x i64> @reverse_nxv12i64(<vscale x 12 x i64> %a) {
 ; RV64-NEXT:    vl8re64.v v16, (a0)
 ; RV64-NEXT:    vl8re64.v v8, (a1)
 ; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    .cfi_def_cfa sp, 80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %res = call <vscale x 12 x i64> @llvm.vector.reverse.nxv12i64(<vscale x 12 x i64> %a)
   ret <vscale x 12 x i64> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 5aa773b01e6926..5f3152bfd3ed86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -335,7 +335,9 @@ define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16(<vscale x 32 x bfloat> %va
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -403,7 +405,9 @@ define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16_unmasked(<vscale x 32 x bf
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -914,7 +918,9 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -997,7 +1003,9 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
index 9cdced88c7bdc2..1ff30cd776e648 100644
--- a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
@@ -41,10 +41,15 @@ define signext i32 @foo(i32 signext %aa) #0 {
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    addi sp, s0, -96
+; CHECK-NEXT:    .cfi_def_cfa sp, 96
 ; CHECK-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 80(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
 ; CHECK-NEXT:    addi sp, sp, 96
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %aa.addr = alloca i32, align 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr104480.ll b/llvm/test/CodeGen/RISCV/rvv/pr104480.ll
index 93cf4d3766089a..16a848d5da9884 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr104480.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr104480.ll
@@ -59,6 +59,7 @@ define <vscale x 4 x i64> @test_mulhs_expand(<vscale x 4 x i64> %broadcast.splat
 ; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %broadcast.splat = shufflevector <vscale x 4 x i64> %broadcast.splatinsert, <vscale x 4 x i64> zeroinitializer, <vscale x 4 x i32> zeroinitializer
@@ -85,6 +86,7 @@ define <vscale x 4 x i64> @test_mulhu_expand(<vscale x 4 x i64> %broadcast.splat
 ; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %broadcast.splat = shufflevector <vscale x 4 x i64> %broadcast.splatinsert, <vscale x 4 x i64> zeroinitializer, <vscale x 4 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
index b6e0d1e2ff4ae7..e8a8d9e422ac18 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
@@ -37,9 +37,13 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
 ; CHECK-NEXT:    vs8r.v v8, (a2)
 ; CHECK-NEXT:    lbu a0, 0(a0)
 ; CHECK-NEXT:    addi sp, s0, -80
+; CHECK-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 80
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %a = bitcast <vscale x 16 x i8> %x to <vscale x 128 x i1>
   %b = extractelement <vscale x 128 x i1> %a, i64 %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr93587.ll b/llvm/test/CodeGen/RISCV/rvv/pr93587.ll
index 1c2923a2de8937..c2998bf20fa0af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr93587.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr93587.ll
@@ -22,6 +22,7 @@ define i16 @f() {
 ; CHECK-NEXT:  .LBB0_3: # %BB2
 ; CHECK-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 BB:
   br label %BB1
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
index 58fd0de31402df..fd3c4593462bf8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
@@ -166,7 +166,21 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
+; CHECK-NEXT:    .cfi_restore s2
+; CHECK-NEXT:    .cfi_restore s3
+; CHECK-NEXT:    .cfi_restore s4
+; CHECK-NEXT:    .cfi_restore s5
+; CHECK-NEXT:    .cfi_restore s6
+; CHECK-NEXT:    .cfi_restore s7
+; CHECK-NEXT:    .cfi_restore s8
+; CHECK-NEXT:    .cfi_restore s9
+; CHECK-NEXT:    .cfi_restore s10
+; CHECK-NEXT:    .cfi_restore s11
 ; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
index 600ac594380b39..142fc2b867173a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
@@ -47,11 +47,17 @@ define void @foo(ptr nocapture noundef %p1) {
 ; CHECK-NEXT:    vfadd.vv v8, v10, v8
 ; CHECK-NEXT:    vse32.v v8, (s2)
 ; CHECK-NEXT:    addi sp, s0, -192
+; CHECK-NEXT:    .cfi_def_cfa sp, 192
 ; CHECK-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 168(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s2, 160(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
+; CHECK-NEXT:    .cfi_restore s2
 ; CHECK-NEXT:    addi sp, sp, 192
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %vla = alloca [10 x i32], align 64
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 4f58ccb5188d31..4a22150ce02d60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -46,7 +46,9 @@ define void @vid(ptr %p) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> poison, i64 -1)
   store volatile <vscale x 8 x i64> %vid, ptr %p
@@ -92,7 +94,9 @@ define void @vid_passthru(ptr %p, <vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> %v, i64 1)
   store volatile <vscale x 8 x i64> %vid, ptr %p
@@ -154,7 +158,9 @@ define void @vmv.v.i(ptr %p) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vmv.v.i = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 1, i64 -1)
   store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
@@ -201,7 +207,9 @@ define void @vmv.v.x_needs_extended(ptr %p, i64 %x) {
 ; POSTRA-NEXT:    csrr a0, vlenb
 ; POSTRA-NEXT:    slli a0, a0, 3
 ; POSTRA-NEXT:    add sp, sp, a0
+; POSTRA-NEXT:    .cfi_def_cfa sp, 16
 ; POSTRA-NEXT:    addi sp, sp, 16
+; POSTRA-NEXT:    .cfi_def_cfa_offset 0
 ; POSTRA-NEXT:    ret
 ;
 ; PRERA-LABEL: vmv.v.x_needs_extended:
@@ -230,7 +238,9 @@ define void @vmv.v.x_needs_extended(ptr %p, i64 %x) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
   store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
@@ -294,7 +304,9 @@ define void @vmv.v.x_live(ptr %p, i64 %x) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
   store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
@@ -359,7 +371,9 @@ define void @vfmv.v.f(ptr %p, double %x) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
   store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
@@ -424,7 +438,9 @@ define void @vmv.s.x(ptr %p, i64 %x) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vmv.s.x = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
   store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p
@@ -489,7 +505,9 @@ define void @vfmv.s.f(ptr %p, double %x) {
 ; PRERA-NEXT:    csrr a0, vlenb
 ; PRERA-NEXT:    slli a0, a0, 3
 ; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    .cfi_def_cfa sp, 16
 ; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    .cfi_def_cfa_offset 0
 ; PRERA-NEXT:    ret
   %vfmv.s.f = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
   store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index a454f9dd97cebf..6a85f5f4dc80b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -316,7 +316,9 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.rint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -380,7 +382,9 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16_unmasked(<vscale x 32 x bfloat>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.rint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -850,7 +854,9 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -927,7 +933,9 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1382,7 +1390,9 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index a4936483e8a152..e06991ae65f55a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -343,7 +343,9 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.round.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -411,7 +413,9 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.round.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -930,7 +934,9 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1013,7 +1019,9 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1513,7 +1521,9 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 9857009002eb90..accb48619e4769 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -343,7 +343,9 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16(<vscale x 32 x bfloat> %va
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundeven.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -411,7 +413,9 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16_unmasked(<vscale x 32 x bf
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundeven.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -930,7 +934,9 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1013,7 +1019,9 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1513,7 +1521,9 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 11830c924867b4..928a96a63dc753 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -343,7 +343,9 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16(<vscale x 32 x bfloat> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundtozero.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -411,7 +413,9 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16_unmasked(<vscale x 32 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundtozero.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -930,7 +934,9 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1013,7 +1019,9 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1513,7 +1521,9 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index c164b62a679be0..f65aa8bb9da680 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -51,10 +51,15 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
 ; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    addi sp, s0, -96
+; CHECK-NEXT:    .cfi_def_cfa sp, 96
 ; CHECK-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 80(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
 ; CHECK-NEXT:    addi sp, sp, 96
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %ret = call <vscale x 16 x i32> @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <vscale x 16 x i32> %x, <vscale x 16 x i32> %x, <vscale x 16 x i32> %x, <vscale x 16 x i32> %x)
   ret <vscale x 16 x i32> %ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 5ba4efa8458c79..35b8ecc5d961d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -1654,7 +1654,9 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64bf16(<vscale x 64 x bfloat> %va, <vscale x 64 x bfloat> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i1> %v
@@ -3781,7 +3783,9 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 4
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: fcmp_oeq_vv_nxv64f16:
@@ -3973,7 +3977,9 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64f16(<vscale x 64 x half> %va, <vscale x 64 x half> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i1> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index eb8c58d2d37790..6a3cdb6cfd1dd2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1132,7 +1132,9 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 128 x i1> @llvm.vp.icmp.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, metadata !"eq", <vscale x 128 x i1> %m, i32 %evl)
   ret <vscale x 128 x i1> %v
@@ -2286,7 +2288,9 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i1> @llvm.vp.icmp.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, metadata !"eq", <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i1> %v
@@ -2383,6 +2387,7 @@ define <vscale x 1 x i1> @icmp_eq_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmseq.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_nxv1i64:
@@ -2409,6 +2414,7 @@ define <vscale x 1 x i1> @icmp_eq_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %b
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmseq.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_swap_nxv1i64:
@@ -2465,6 +2471,7 @@ define <vscale x 1 x i1> @icmp_ne_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsne.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_nxv1i64:
@@ -2491,6 +2498,7 @@ define <vscale x 1 x i1> @icmp_ne_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %b
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsne.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_swap_nxv1i64:
@@ -2547,6 +2555,7 @@ define <vscale x 1 x i1> @icmp_ugt_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsltu.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_nxv1i64:
@@ -2573,6 +2582,7 @@ define <vscale x 1 x i1> @icmp_ugt_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsltu.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_swap_nxv1i64:
@@ -2629,6 +2639,7 @@ define <vscale x 1 x i1> @icmp_uge_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsleu.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_nxv1i64:
@@ -2657,6 +2668,7 @@ define <vscale x 1 x i1> @icmp_uge_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsleu.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_swap_nxv1i64:
@@ -2713,6 +2725,7 @@ define <vscale x 1 x i1> @icmp_ult_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsltu.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_nxv1i64:
@@ -2739,6 +2752,7 @@ define <vscale x 1 x i1> @icmp_ult_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsltu.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_swap_nxv1i64:
@@ -2795,6 +2809,7 @@ define <vscale x 1 x i1> @icmp_sgt_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmslt.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_nxv1i64:
@@ -2821,6 +2836,7 @@ define <vscale x 1 x i1> @icmp_sgt_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmslt.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_swap_nxv1i64:
@@ -2877,6 +2893,7 @@ define <vscale x 1 x i1> @icmp_sge_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsle.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_nxv1i64:
@@ -2905,6 +2922,7 @@ define <vscale x 1 x i1> @icmp_sge_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsle.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_swap_nxv1i64:
@@ -2961,6 +2979,7 @@ define <vscale x 1 x i1> @icmp_slt_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmslt.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_nxv1i64:
@@ -2987,6 +3006,7 @@ define <vscale x 1 x i1> @icmp_slt_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmslt.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_swap_nxv1i64:
@@ -3043,6 +3063,7 @@ define <vscale x 1 x i1> @icmp_sle_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsle.vv v0, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_nxv1i64:
@@ -3069,6 +3090,7 @@ define <vscale x 1 x i1> @icmp_sle_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmsle.vv v0, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_swap_nxv1i64:
@@ -3131,6 +3153,7 @@ define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vmseq.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_nxv8i64:
@@ -3159,6 +3182,7 @@ define <vscale x 8 x i1> @icmp_eq_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %b
 ; RV32-NEXT:    vmseq.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_swap_nxv8i64:
@@ -3220,6 +3244,7 @@ define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vmsne.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_nxv8i64:
@@ -3248,6 +3273,7 @@ define <vscale x 8 x i1> @icmp_ne_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %b
 ; RV32-NEXT:    vmsne.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_swap_nxv8i64:
@@ -3309,6 +3335,7 @@ define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmsltu.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_nxv8i64:
@@ -3337,6 +3364,7 @@ define <vscale x 8 x i1> @icmp_ugt_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmsltu.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_swap_nxv8i64:
@@ -3398,6 +3426,7 @@ define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmsleu.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_nxv8i64:
@@ -3428,6 +3457,7 @@ define <vscale x 8 x i1> @icmp_uge_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmsleu.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_swap_nxv8i64:
@@ -3489,6 +3519,7 @@ define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmsltu.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_nxv8i64:
@@ -3517,6 +3548,7 @@ define <vscale x 8 x i1> @icmp_ult_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmsltu.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_swap_nxv8i64:
@@ -3578,6 +3610,7 @@ define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmslt.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_nxv8i64:
@@ -3606,6 +3639,7 @@ define <vscale x 8 x i1> @icmp_sgt_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmslt.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_swap_nxv8i64:
@@ -3667,6 +3701,7 @@ define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmsle.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_nxv8i64:
@@ -3697,6 +3732,7 @@ define <vscale x 8 x i1> @icmp_sge_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmsle.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_swap_nxv8i64:
@@ -3758,6 +3794,7 @@ define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmslt.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_nxv8i64:
@@ -3786,6 +3823,7 @@ define <vscale x 8 x i1> @icmp_slt_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmslt.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_swap_nxv8i64:
@@ -3847,6 +3885,7 @@ define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
 ; RV32-NEXT:    vmsle.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_nxv8i64:
@@ -3875,6 +3914,7 @@ define <vscale x 8 x i1> @icmp_sle_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vmsle.vv v16, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_swap_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
index 90ffeff9689e07..c21dd6d4e50b29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
@@ -2062,6 +2062,7 @@ define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmseq.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_vx_nxv8i64:
@@ -2087,6 +2088,7 @@ define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmseq.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_eq_xv_nxv8i64:
@@ -2152,6 +2154,7 @@ define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsne.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_vx_nxv8i64:
@@ -2177,6 +2180,7 @@ define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsne.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ne_xv_nxv8i64:
@@ -2222,6 +2226,7 @@ define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsltu.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_vx_nxv8i64:
@@ -2247,6 +2252,7 @@ define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsltu.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ugt_xv_nxv8i64:
@@ -2292,6 +2298,7 @@ define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsleu.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_vx_nxv8i64:
@@ -2318,6 +2325,7 @@ define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsleu.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_uge_xv_nxv8i64:
@@ -2424,6 +2432,7 @@ define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsltu.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_vx_nxv8i64:
@@ -2449,6 +2458,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsltu.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ult_xv_nxv8i64:
@@ -2545,6 +2555,7 @@ define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsleu.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ule_vx_nxv8i64:
@@ -2570,6 +2581,7 @@ define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsleu.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_ule_xv_nxv8i64:
@@ -2616,6 +2628,7 @@ define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmslt.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_vx_nxv8i64:
@@ -2641,6 +2654,7 @@ define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmslt.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sgt_xv_nxv8i64:
@@ -2686,6 +2700,7 @@ define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsle.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_vx_nxv8i64:
@@ -2712,6 +2727,7 @@ define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsle.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sge_xv_nxv8i64:
@@ -2798,6 +2814,7 @@ define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmslt.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_vx_nxv8i64:
@@ -2823,6 +2840,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmslt.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_slt_xv_nxv8i64:
@@ -2909,6 +2927,7 @@ define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsle.vv v0, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_vx_nxv8i64:
@@ -2934,6 +2953,7 @@ define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmsle.vv v0, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: icmp_sle_xv_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
index 28583efccdbca4..27062ac6bd33e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
@@ -79,6 +79,7 @@ define i32 @splat_vector_split_i64() {
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
     %1 = insertelement <vscale x 2 x i64> zeroinitializer, i64 3, i64 3
     %2 = tail call <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64> %1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
index 70fcabe59889fb..8b36c3634f6277 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
@@ -31,7 +31,9 @@ define i64 @i64(<vscale x 1 x i64> %v, i1 %c) {
 ; RV32-NEXT:  .LBB0_3: # %falsebb
 ; RV32-NEXT:    csrr a2, vlenb
 ; RV32-NEXT:    add sp, sp, a2
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: i64:
@@ -52,7 +54,9 @@ define i64 @i64(<vscale x 1 x i64> %v, i1 %c) {
 ; RV64-NEXT:  .LBB0_2: # %falsebb
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    add sp, sp, a1
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -82,7 +86,9 @@ define i32 @i32(<vscale x 2 x i32> %v, i1 %c) {
 ; CHECK-NEXT:  .LBB1_2: # %falsebb
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    add sp, sp, a1
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -112,7 +118,9 @@ define i16 @i16(<vscale x 4 x i16> %v, i1 %c) {
 ; CHECK-NEXT:  .LBB2_2: # %falsebb
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    add sp, sp, a1
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -142,7 +150,9 @@ define i8 @i8(<vscale x 8 x i8> %v, i1 %c) {
 ; CHECK-NEXT:  .LBB3_2: # %falsebb
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    add sp, sp, a1
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -175,7 +185,9 @@ define double @f64(<vscale x 1 x double> %v, i1 %c) {
 ; RV32-NEXT:  .LBB4_3: # %falsebb
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: f64:
@@ -199,7 +211,9 @@ define double @f64(<vscale x 1 x double> %v, i1 %c) {
 ; RV64-NEXT:  .LBB4_3: # %falsebb
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -232,7 +246,9 @@ define float @f32(<vscale x 2 x float> %v, i1 %c) {
 ; CHECK-NEXT:  .LBB5_3: # %falsebb
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -265,7 +281,9 @@ define half @f16(<vscale x 1 x half> %v, i1 %c) {
 ; ZFMIN-NEXT:  .LBB6_3: # %falsebb
 ; ZFMIN-NEXT:    csrr a0, vlenb
 ; ZFMIN-NEXT:    add sp, sp, a0
+; ZFMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZFMIN-NEXT:    addi sp, sp, 16
+; ZFMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZFMIN-NEXT:    ret
 ;
 ; NOZFMIN-LABEL: f16:
@@ -292,7 +310,9 @@ define half @f16(<vscale x 1 x half> %v, i1 %c) {
 ; NOZFMIN-NEXT:    fmv.w.x fa0, a0
 ; NOZFMIN-NEXT:    csrr a0, vlenb
 ; NOZFMIN-NEXT:    add sp, sp, a0
+; NOZFMIN-NEXT:    .cfi_def_cfa sp, 16
 ; NOZFMIN-NEXT:    addi sp, sp, 16
+; NOZFMIN-NEXT:    .cfi_def_cfa_offset 0
 ; NOZFMIN-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
@@ -326,7 +346,9 @@ define bfloat @bf16(<vscale x 2 x bfloat> %v, i1 %c) {
 ; ZFMIN-NEXT:  .LBB7_3: # %falsebb
 ; ZFMIN-NEXT:    csrr a0, vlenb
 ; ZFMIN-NEXT:    add sp, sp, a0
+; ZFMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZFMIN-NEXT:    addi sp, sp, 16
+; ZFMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZFMIN-NEXT:    ret
 ;
 ; NOZFMIN-LABEL: bf16:
@@ -353,7 +375,9 @@ define bfloat @bf16(<vscale x 2 x bfloat> %v, i1 %c) {
 ; NOZFMIN-NEXT:    fmv.w.x fa0, a0
 ; NOZFMIN-NEXT:    csrr a0, vlenb
 ; NOZFMIN-NEXT:    add sp, sp, a0
+; NOZFMIN-NEXT:    .cfi_def_cfa sp, 16
 ; NOZFMIN-NEXT:    addi sp, sp, 16
+; NOZFMIN-NEXT:    .cfi_def_cfa_offset 0
 ; NOZFMIN-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index 721f03120bd499..2596d8c1069ce7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -493,6 +493,7 @@ define <vscale x 8 x i64> @mul_bigimm_stepvector_nxv8i64() {
 ; RV32-NEXT:    vid.v v16
 ; RV32-NEXT:    vmul.vv v8, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mul_bigimm_stepvector_nxv8i64:
@@ -540,6 +541,7 @@ define <vscale x 16 x i64> @stepvector_nxv16i64() {
 ; RV32-NEXT:    vid.v v8
 ; RV32-NEXT:    vadd.vv v16, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: stepvector_nxv16i64:
@@ -569,6 +571,7 @@ define <vscale x 16 x i64> @add_stepvector_nxv16i64() {
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vadd.vv v16, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: add_stepvector_nxv16i64:
@@ -605,6 +608,7 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vadd.vv v16, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mul_stepvector_nxv16i64:
@@ -657,6 +661,7 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
 ; RV32-NEXT:    vmul.vv v8, v24, v8
 ; RV32-NEXT:    vadd.vv v16, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mul_bigimm_stepvector_nxv16i64:
@@ -694,6 +699,7 @@ define <vscale x 16 x i64> @shl_stepvector_nxv16i64() {
 ; RV32-NEXT:    vsll.vi v8, v8, 2
 ; RV32-NEXT:    vadd.vv v16, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: shl_stepvector_nxv16i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index abdf9ab09bb9ae..50e9ef28dd2c03 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -665,7 +665,9 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.nxv17f64.p0.i32(<vscale x 17 x double> %v, ptr %ptr, i32 %stride, <vscale x 17 x i1> %mask, i32 %evl)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
index 6f5bb3a6643991..c81e678900ab04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
@@ -23,6 +23,7 @@ define dso_local signext i32 @undef_early_clobber_chain() {
 ; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    addi sp, sp, 400
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %dst = alloca [100 x float], align 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
index cd9edca1d4c44f..696bd39fe2979b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
@@ -171,6 +171,7 @@ define <vscale x 8 x i64> @vaaddu_vx_nxv8i64_floor(<vscale x 8 x i64> %x, i64 %y
 ; RV32-NEXT:    csrwi vxrm, 2
 ; RV32-NEXT:    vaaddu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vaaddu_vx_nxv8i64_floor:
@@ -390,6 +391,7 @@ define <vscale x 8 x i64> @vaaddu_vx_nxv8i64_ceil(<vscale x 8 x i64> %x, i64 %y)
 ; RV32-NEXT:    csrwi vxrm, 0
 ; RV32-NEXT:    vaaddu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vaaddu_vx_nxv8i64_ceil:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
index 2b141097366cfb..4cba8af2771ff4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
@@ -601,6 +601,7 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv1i64:
@@ -646,6 +647,7 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv2i64:
@@ -691,6 +693,7 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv4i64:
@@ -736,6 +739,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index feeef73e538ae0..170cfed14b7ac7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -1492,6 +1492,7 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv1i64:
@@ -1518,6 +1519,7 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv1i64_unmasked:
@@ -1586,6 +1588,7 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv2i64:
@@ -1612,6 +1615,7 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv2i64_unmasked:
@@ -1680,6 +1684,7 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv4i64:
@@ -1706,6 +1711,7 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv4i64_unmasked:
@@ -1774,6 +1780,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv8i64:
@@ -1800,6 +1807,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vadd_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
index a84e2c984f669c..d41d44838a91cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
@@ -978,6 +978,7 @@ define <vscale x 1 x i64> @vand_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv1i64:
@@ -1044,6 +1045,7 @@ define <vscale x 2 x i64> @vand_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv2i64:
@@ -1110,6 +1112,7 @@ define <vscale x 4 x i64> @vand_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv4i64:
@@ -1176,6 +1179,7 @@ define <vscale x 8 x i64> @vand_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
index 7b4a68d5867f99..5ac3283efb25d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
@@ -1319,6 +1319,7 @@ define <vscale x 1 x i64> @vand_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv1i64:
@@ -1345,6 +1346,7 @@ define <vscale x 1 x i64> @vand_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv1i64_unmasked:
@@ -1413,6 +1415,7 @@ define <vscale x 2 x i64> @vand_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv2i64:
@@ -1439,6 +1442,7 @@ define <vscale x 2 x i64> @vand_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv2i64_unmasked:
@@ -1507,6 +1511,7 @@ define <vscale x 4 x i64> @vand_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv4i64:
@@ -1533,6 +1538,7 @@ define <vscale x 4 x i64> @vand_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv4i64_unmasked:
@@ -1601,6 +1607,7 @@ define <vscale x 8 x i64> @vand_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv8i64:
@@ -1627,6 +1634,7 @@ define <vscale x 8 x i64> @vand_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vand_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index f25a3f937f1b4f..afd452f09e0f8d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -1422,6 +1422,7 @@ define <vscale x 1 x i64> @vandn_vx_nxv1i64(i64 %x, <vscale x 1 x i64> %y) {
 ; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v9, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_nxv1i64:
@@ -1444,6 +1445,7 @@ define <vscale x 1 x i64> @vandn_vx_nxv1i64(i64 %x, <vscale x 1 x i64> %y) {
 ; CHECK-ZVKB32-NEXT:    vlse64.v v9, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v9, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_nxv1i64:
@@ -1472,6 +1474,7 @@ define <vscale x 1 x i64> @vandn_vx_swapped_nxv1i64(i64 %x, <vscale x 1 x i64> %
 ; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v9, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_swapped_nxv1i64:
@@ -1494,6 +1497,7 @@ define <vscale x 1 x i64> @vandn_vx_swapped_nxv1i64(i64 %x, <vscale x 1 x i64> %
 ; CHECK-ZVKB32-NEXT:    vlse64.v v9, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v9, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv1i64:
@@ -1558,6 +1562,7 @@ define <vscale x 2 x i64> @vandn_vx_nxv2i64(i64 %x, <vscale x 2 x i64> %y) {
 ; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v10, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_nxv2i64:
@@ -1580,6 +1585,7 @@ define <vscale x 2 x i64> @vandn_vx_nxv2i64(i64 %x, <vscale x 2 x i64> %y) {
 ; CHECK-ZVKB32-NEXT:    vlse64.v v10, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v10, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_nxv2i64:
@@ -1608,6 +1614,7 @@ define <vscale x 2 x i64> @vandn_vx_swapped_nxv2i64(i64 %x, <vscale x 2 x i64> %
 ; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v10, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_swapped_nxv2i64:
@@ -1630,6 +1637,7 @@ define <vscale x 2 x i64> @vandn_vx_swapped_nxv2i64(i64 %x, <vscale x 2 x i64> %
 ; CHECK-ZVKB32-NEXT:    vlse64.v v10, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v10, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv2i64:
@@ -1694,6 +1702,7 @@ define <vscale x 4 x i64> @vandn_vx_nxv4i64(i64 %x, <vscale x 4 x i64> %y) {
 ; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v12, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_nxv4i64:
@@ -1716,6 +1725,7 @@ define <vscale x 4 x i64> @vandn_vx_nxv4i64(i64 %x, <vscale x 4 x i64> %y) {
 ; CHECK-ZVKB32-NEXT:    vlse64.v v12, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v12, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_nxv4i64:
@@ -1744,6 +1754,7 @@ define <vscale x 4 x i64> @vandn_vx_swapped_nxv4i64(i64 %x, <vscale x 4 x i64> %
 ; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v12, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_swapped_nxv4i64:
@@ -1766,6 +1777,7 @@ define <vscale x 4 x i64> @vandn_vx_swapped_nxv4i64(i64 %x, <vscale x 4 x i64> %
 ; CHECK-ZVKB32-NEXT:    vlse64.v v12, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v12, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv4i64:
@@ -1830,6 +1842,7 @@ define <vscale x 8 x i64> @vandn_vx_nxv8i64(i64 %x, <vscale x 8 x i64> %y) {
 ; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v16, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_nxv8i64:
@@ -1852,6 +1865,7 @@ define <vscale x 8 x i64> @vandn_vx_nxv8i64(i64 %x, <vscale x 8 x i64> %y) {
 ; CHECK-ZVKB32-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v16, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_nxv8i64:
@@ -1880,6 +1894,7 @@ define <vscale x 8 x i64> @vandn_vx_swapped_nxv8i64(i64 %x, <vscale x 8 x i64> %
 ; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-RV32-NEXT:    vand.vv v8, v16, v8
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_swapped_nxv8i64:
@@ -1902,6 +1917,7 @@ define <vscale x 8 x i64> @vandn_vx_swapped_nxv8i64(i64 %x, <vscale x 8 x i64> %
 ; CHECK-ZVKB32-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v16, v8
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
index 95866543828fc7..0be0dcecea1e59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
@@ -1120,6 +1120,7 @@ define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <v
 ; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-RV32-NEXT:    vand.vv v8, v8, v9, v0.t
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv1i64:
@@ -1143,6 +1144,7 @@ define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <v
 ; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v9, v0.t
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv1i64:
@@ -1211,6 +1213,7 @@ define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <v
 ; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-RV32-NEXT:    vand.vv v8, v8, v10, v0.t
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv2i64:
@@ -1234,6 +1237,7 @@ define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <v
 ; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v10, v0.t
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv2i64:
@@ -1302,6 +1306,7 @@ define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <v
 ; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-RV32-NEXT:    vand.vv v8, v8, v12, v0.t
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv4i64:
@@ -1325,6 +1330,7 @@ define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <v
 ; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v12, v0.t
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv4i64:
@@ -1393,6 +1399,7 @@ define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <v
 ; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-RV32-NEXT:    vand.vv v8, v8, v16, v0.t
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv8i64:
@@ -1416,6 +1423,7 @@ define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <v
 ; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v16, v0.t
 ; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-ZVKB32-NEXT:    ret
 ;
 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
index ef9b2104b2d2d2..b2c03a88b71301 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
@@ -777,6 +777,7 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vdiv.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv1i64:
@@ -810,6 +811,7 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-V-NEXT:    vsra.vi v8, v8, 1
 ; RV32-V-NEXT:    vadd.vv v8, v8, v9
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdiv_vi_nxv1i64_0:
@@ -856,6 +858,7 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vdiv.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv2i64:
@@ -889,6 +892,7 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-V-NEXT:    vsra.vi v8, v8, 1
 ; RV32-V-NEXT:    vadd.vv v8, v8, v10
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdiv_vi_nxv2i64_0:
@@ -935,6 +939,7 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vdiv.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv4i64:
@@ -968,6 +973,7 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-V-NEXT:    vsra.vi v8, v8, 1
 ; RV32-V-NEXT:    vadd.vv v8, v8, v12
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdiv_vi_nxv4i64_0:
@@ -1014,6 +1020,7 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vdiv.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv8i64:
@@ -1047,6 +1054,7 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-V-NEXT:    vsra.vi v8, v8, 1
 ; RV32-V-NEXT:    vadd.vv v8, v8, v16
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdiv_vi_nxv8i64_0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
index a4b7ca7f39768f..49d563d2f4aa29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
@@ -900,6 +900,7 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv1i64:
@@ -926,6 +927,7 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv1i64_unmasked:
@@ -974,6 +976,7 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv2i64:
@@ -1000,6 +1003,7 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv2i64_unmasked:
@@ -1048,6 +1052,7 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv4i64:
@@ -1074,6 +1079,7 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv4i64_unmasked:
@@ -1122,6 +1128,7 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv8i64:
@@ -1148,6 +1155,7 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdiv.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdiv_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
index 4f2fb937ca73f4..308b6031c431cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
@@ -669,6 +669,7 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vdivu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv1i64:
@@ -698,6 +699,7 @@ define <vscale x 1 x i64> @vdivu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-V-NEXT:    li a0, 61
 ; RV32-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdivu_vi_nxv1i64_0:
@@ -766,6 +768,7 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vdivu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv2i64:
@@ -795,6 +798,7 @@ define <vscale x 2 x i64> @vdivu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-V-NEXT:    li a0, 61
 ; RV32-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdivu_vi_nxv2i64_0:
@@ -863,6 +867,7 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vdivu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv4i64:
@@ -892,6 +897,7 @@ define <vscale x 4 x i64> @vdivu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-V-NEXT:    li a0, 61
 ; RV32-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdivu_vi_nxv4i64_0:
@@ -960,6 +966,7 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vdivu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv8i64:
@@ -989,6 +996,7 @@ define <vscale x 8 x i64> @vdivu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-V-NEXT:    li a0, 61
 ; RV32-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vdivu_vi_nxv8i64_0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
index 67c3f9dbf2869a..cfa4359245d027 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
@@ -899,6 +899,7 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv1i64:
@@ -925,6 +926,7 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv1i64_unmasked:
@@ -973,6 +975,7 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv2i64:
@@ -999,6 +1002,7 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv2i64_unmasked:
@@ -1047,6 +1051,7 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv4i64:
@@ -1073,6 +1078,7 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv4i64_unmasked:
@@ -1121,6 +1127,7 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv8i64:
@@ -1147,6 +1154,7 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vdivu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 54373d94f8f5f3..050042ce681522 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -180,7 +180,9 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i6
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %vec = load <vscale x 16 x i64>, ptr %p
   %retval = call {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv16i64(<vscale x 16 x i64> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 28f7eb4329e3b9..a4beef27367dda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -124,7 +124,9 @@ define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 %retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
 ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
@@ -243,7 +245,9 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 %retval = call {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv16i64(<vscale x 16 x i64> %vec)
 ret {<vscale x 8 x i64>, <vscale x 8 x i64>} %retval
@@ -497,7 +501,9 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 %retval = call {<vscale x 8 x double>, <vscale x 8 x double>} @llvm.vector.deinterleave2.nxv16f64(<vscale x 16 x double> %vec)
 ret {<vscale x 8 x double>, <vscale x 8 x double>} %retval
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index a06aa2d02b11b5..13399c6f94c22a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -136,7 +136,9 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
   store <vscale x 16 x i64> %res, ptr %p
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 83c235d8e87ab7..779dd6cd74f508 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -317,7 +317,9 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ;
 ; ZVBB-LABEL: vector_interleave_nxv16i64_nxv8i64:
@@ -352,7 +354,9 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    slli a0, a0, 3
 ; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    .cfi_def_cfa sp, 16
 ; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    .cfi_def_cfa_offset 0
 ; ZVBB-NEXT:    ret
   %res = call <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
   ret <vscale x 16 x i64> %res
@@ -721,7 +725,9 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ;
 ; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
@@ -756,7 +762,9 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    slli a0, a0, 3
 ; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    .cfi_def_cfa sp, 16
 ; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    .cfi_def_cfa_offset 0
 ; ZVBB-NEXT:    ret
   %res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
   ret <vscale x 16 x double> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 4c298ab2b5e6db..fd70c75b306c94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -446,7 +446,9 @@ define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -496,7 +498,9 @@ define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -578,7 +582,9 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
 ; CHECK-NEXT:    slli a1, a0, 4
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -644,7 +650,9 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -1223,7 +1231,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1279,7 +1289,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1367,7 +1379,9 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 ; ZVFHMIN-NEXT:    slli a1, a0, 4
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -1439,7 +1453,9 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index 0fe6c5dec42649..187310c8b7f5e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -408,7 +408,9 @@ define <vscale x 32 x bfloat> @vfdiv_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fdiv.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -458,7 +460,9 @@ define <vscale x 32 x bfloat> @vfdiv_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fdiv.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -540,7 +544,9 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
 ; CHECK-NEXT:    slli a1, a0, 4
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -606,7 +612,9 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -1135,7 +1143,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1191,7 +1201,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1279,7 +1291,9 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 ; ZVFHMIN-NEXT:    slli a1, a0, 4
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -1351,7 +1365,9 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index f0c74d064016a8..3871ea1fa2d6a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -493,7 +493,9 @@ define <vscale x 16 x bfloat> @vfma_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.fma.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x bfloat> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -540,7 +542,9 @@ define <vscale x 16 x bfloat> @vfma_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfl
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
@@ -593,7 +597,9 @@ define <vscale x 16 x bfloat> @vfma_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
@@ -626,7 +632,9 @@ define <vscale x 16 x bfloat> @vfma_vf_nxv16bf16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
@@ -781,7 +789,9 @@ define <vscale x 32 x bfloat> @vfma_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fma.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x bfloat> %c, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -883,7 +893,9 @@ define <vscale x 32 x bfloat> @vfma_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fma.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x bfloat> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -1041,7 +1053,9 @@ define <vscale x 32 x bfloat> @vfma_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfl
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -1201,7 +1215,9 @@ define <vscale x 32 x bfloat> @vfma_vf_nxv32bf16_commute(<vscale x 32 x bfloat>
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -1306,7 +1322,9 @@ define <vscale x 32 x bfloat> @vfma_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -1410,7 +1428,9 @@ define <vscale x 32 x bfloat> @vfma_vf_nxv32bf16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -2054,7 +2074,9 @@ define <vscale x 16 x half> @vfma_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -2113,7 +2135,9 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16(<vscale x 16 x half> %va, half %b,
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -2178,7 +2202,9 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -2217,7 +2243,9 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked_commute(<vscale x 16 x ha
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -2380,7 +2408,9 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -2489,7 +2519,9 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -2653,7 +2685,9 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16(<vscale x 32 x half> %va, half %b,
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -2819,7 +2853,9 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_commute(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -2930,7 +2966,9 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -3040,7 +3078,9 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked_commute(<vscale x 32 x ha
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -3828,7 +3868,9 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.fma.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -3897,7 +3939,9 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.fma.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
@@ -7727,7 +7771,9 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -7772,7 +7818,9 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -7844,7 +7892,9 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_commuted(<vscale x 16 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
   %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -7944,7 +7994,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16(<vscale x 16 x half> %va, half
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -8024,7 +8076,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -8071,7 +8125,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -8118,7 +8174,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -8291,7 +8349,9 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_commuted(<vscale x 16 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
   %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -8454,7 +8514,9 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -8499,7 +8561,9 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -8795,7 +8859,9 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
   %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
@@ -8923,7 +8989,9 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -9053,7 +9121,9 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -9185,7 +9255,9 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_commute(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -9307,7 +9379,9 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -9431,7 +9505,9 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -9554,7 +9630,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
@@ -9704,7 +9782,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
@@ -9826,7 +9906,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -9948,7 +10030,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -10099,7 +10183,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10233,7 +10319,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10358,7 +10446,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10484,7 +10574,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10612,7 +10704,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10746,7 +10840,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10872,7 +10968,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -10997,7 +11095,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -11121,7 +11221,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
@@ -11271,7 +11373,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
@@ -11393,7 +11497,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -11515,7 +11621,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -11649,7 +11757,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -11776,7 +11886,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -11899,7 +12011,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -12021,7 +12135,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -12148,7 +12264,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -12298,7 +12416,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_commute(<vscale x 32
 ; ZVFHMIN-NEXT:    slli a1, a0, 5
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -12421,7 +12541,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -12544,7 +12666,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index dea411348ce542..9e9a71924888a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -189,7 +189,9 @@ define <vscale x 16 x bfloat> @vfmadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %vd = call <vscale x 16 x bfloat> @llvm.experimental.constrained.fma.nxv16bf16(<vscale x 16 x bfloat> %vc, <vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <vscale x 16 x bfloat> %vd
@@ -220,7 +222,9 @@ define <vscale x 16 x bfloat> @vfmadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x bfloat> poison, bfloat %c, i32 0
   %splat = shufflevector <vscale x 16 x bfloat> %head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
@@ -318,7 +322,9 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %vd = call <vscale x 32 x bfloat> @llvm.experimental.constrained.fma.nxv32bf16(<vscale x 32 x bfloat> %vc, <vscale x 32 x bfloat> %vb, <vscale x 32 x bfloat> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <vscale x 32 x bfloat> %vd
@@ -389,7 +395,9 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x bfloat> poison, bfloat %c, i32 0
   %splat = shufflevector <vscale x 32 x bfloat> %head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -629,7 +637,9 @@ define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <vscale x 16 x half> %vd
@@ -666,7 +676,9 @@ define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 16 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -772,7 +784,9 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x half> %vb, <vscale x 32 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <vscale x 32 x half> %vd
@@ -849,7 +863,9 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    li a1, 24
 ; ZVFHMIN-NEXT:    mul a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 2df2212c43db09..117a4a54007f81 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -220,7 +220,9 @@ define <vscale x 16 x bfloat> @vfmadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x bfloat> poison, bfloat %c, i32 0
   %splat = shufflevector <vscale x 16 x bfloat> %head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
@@ -327,7 +329,9 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 5
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmadd_vv_nxv32bf16:
@@ -418,7 +422,9 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %vd = call <vscale x 32 x bfloat> @llvm.fma.v32bf16(<vscale x 32 x bfloat> %vc, <vscale x 32 x bfloat> %vb, <vscale x 32 x bfloat> %va)
   ret <vscale x 32 x bfloat> %vd
@@ -493,7 +499,9 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
 ; ZVFH-NEXT:    slli a0, a0, 1
 ; ZVFH-NEXT:    add a0, a0, a1
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmadd_vf_nxv32bf16:
@@ -560,7 +568,9 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
 ; ZVFHMIN-NEXT:    li a1, 24
 ; ZVFHMIN-NEXT:    mul a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x bfloat> poison, bfloat %c, i32 0
   %splat = shufflevector <vscale x 32 x bfloat> %head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -845,7 +855,9 @@ define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 16 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -951,7 +963,9 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %vd = call <vscale x 32 x half> @llvm.fma.v32f16(<vscale x 32 x half> %vc, <vscale x 32 x half> %vb, <vscale x 32 x half> %va)
   ret <vscale x 32 x half> %vd
@@ -1028,7 +1042,9 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    li a1, 24
 ; ZVFHMIN-NEXT:    mul a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 6e38881b4d60fb..3c627974da49e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -218,7 +218,9 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.maxnum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -268,7 +270,9 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.maxnum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -545,7 +549,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -601,7 +607,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index f1d6b2100ae980..675ff12dfa42b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -218,7 +218,9 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.minnum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -268,7 +270,9 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.minnum.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -545,7 +549,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -601,7 +607,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
index 7ec241bf74247a..51a49b7842b6cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
@@ -269,7 +269,9 @@ define <vscale x 16 x half> @vfmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %neg = fneg <vscale x 16 x half> %vb
   %vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x half> %va, <vscale x 16 x half> %neg, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -310,7 +312,9 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 2
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 16 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
@@ -396,7 +400,9 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    li a1, 24
 ; ZVFHMIN-NEXT:    mul a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %neg = fneg <vscale x 32 x half> %va
   %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x half> %vb, <vscale x 32 x half> %neg, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -476,7 +482,9 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
 ; ZVFHMIN-NEXT:    li a1, 24
 ; ZVFHMIN-NEXT:    mul a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 3114fb5d3bfa3e..8ce38cf6ceaab9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -530,7 +530,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -586,7 +588,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -674,7 +678,9 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 ; ZVFHMIN-NEXT:    slli a1, a0, 4
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -746,7 +752,9 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index abda6750e5a8a5..2c94419329a420 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1200,7 +1200,9 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
 ; CHECK-NEXT:    li a1, 40
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.fmuladd.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -1265,7 +1267,9 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    li a1, 24
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.fmuladd.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index 5ec089a2dcac86..13e81c8f4ba683 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -400,7 +400,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %neg = fneg <vscale x 32 x half> %vc
   %neg2 = fneg <vscale x 32 x half> %vb
@@ -497,7 +499,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 286492bce2960c..b4d6a21276abe3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -380,7 +380,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %neg = fneg <vscale x 32 x half> %vc
   %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %neg, <vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -473,7 +475,9 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
   %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index f42b603509c22a..7dc0b637dfb647 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -541,7 +541,9 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 403bc595b9bbd1..4e1a9a778bf8e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -541,7 +541,9 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index da16feeddecd71..93887166a13b8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -128,7 +128,9 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x float> @llvm.vp.fptrunc.nxv16f64.nxv16f32(<vscale x 16 x double> %a, <vscale x 16 x i1> %m, i32 %vl)
   ret <vscale x 16 x float> %v
@@ -213,7 +215,9 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x float> @llvm.vp.fptrunc.nxv32f64.nxv32f32(<vscale x 32 x double> %a, <vscale x 32 x i1> %m, i32 %vl)
   ret <vscale x 32 x float> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index dd57b65b50f4f1..601db797240104 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -408,7 +408,9 @@ define <vscale x 32 x bfloat> @vfsub_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fsub.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -458,7 +460,9 @@ define <vscale x 32 x bfloat> @vfsub_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.fsub.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -540,7 +544,9 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
 ; CHECK-NEXT:    slli a1, a0, 4
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -606,7 +612,9 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
   %vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
@@ -1135,7 +1143,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1191,7 +1201,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -1279,7 +1291,9 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 ; ZVFHMIN-NEXT:    slli a1, a0, 4
 ; ZVFHMIN-NEXT:    add a0, a1, a0
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
@@ -1351,7 +1365,9 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
index 80ada4670562d7..6cd3884f029fdd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
@@ -652,7 +652,9 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vscal
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
   %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
index 6ea58a4e768736..0a0bc6696a9f96 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
@@ -609,7 +609,9 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
   %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
index 0afbe58038c76f..b5f7ef3380869f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
@@ -585,7 +585,9 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add sp, sp, a0
+; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
   %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
   %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
index e232ac255c56f0..259bbc1b469022 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
@@ -1660,6 +1660,7 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vmacc.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv1i64:
@@ -1690,6 +1691,7 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
 ; RV32-NEXT:    vmacc.vv v9, v8, v10
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv1i64_unmasked:
@@ -1733,6 +1735,7 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmacc.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv1i64_ta:
@@ -1794,6 +1797,7 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vmacc.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv2i64:
@@ -1824,6 +1828,7 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
 ; RV32-NEXT:    vmacc.vv v10, v8, v12
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv2i64_unmasked:
@@ -1867,6 +1872,7 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmacc.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv2i64_ta:
@@ -1928,6 +1934,7 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vmacc.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv4i64:
@@ -1958,6 +1965,7 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
 ; RV32-NEXT:    vmacc.vv v12, v8, v16
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv4i64_unmasked:
@@ -2001,6 +2009,7 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmacc.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv4i64_ta:
@@ -2064,6 +2073,7 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vmacc.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv8i64:
@@ -2094,6 +2104,7 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
 ; RV32-NEXT:    vmacc.vv v16, v8, v24
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv8i64_unmasked:
@@ -2138,6 +2149,7 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmacc.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmacc_vx_nxv8i64_ta:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
index e1988c058fac34..20ff2081cf7605 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
@@ -465,6 +465,7 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmadd.vv v8, v10, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv1i64:
@@ -502,6 +503,7 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmacc.vv v8, v10, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv2i64:
@@ -539,6 +541,7 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmadd.vv v8, v16, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv4i64:
@@ -577,6 +580,7 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; RV32-NEXT:    vlse64.v v24, (a0), zero
 ; RV32-NEXT:    vmacc.vv v8, v16, v24
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll
index 5401bf7db49e38..79ab69bf43fee8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll
@@ -1645,6 +1645,7 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv1i64:
@@ -1675,6 +1676,7 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv1i64_unmasked:
@@ -1717,6 +1719,7 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmadd.vv v10, v8, v9
 ; RV32-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv1i64_ta:
@@ -1780,6 +1783,7 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv2i64:
@@ -1810,6 +1814,7 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
 ; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv2i64_unmasked:
@@ -1852,6 +1857,7 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmadd.vv v12, v8, v10
 ; RV32-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv2i64_ta:
@@ -1915,6 +1921,7 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv4i64:
@@ -1945,6 +1952,7 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
 ; RV32-NEXT:    vmv.v.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv4i64_unmasked:
@@ -1987,6 +1995,7 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmadd.vv v16, v8, v12
 ; RV32-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv4i64_ta:
@@ -2052,6 +2061,7 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
 ; RV32-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv8i64:
@@ -2082,6 +2092,7 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
 ; RV32-NEXT:    vmv.v.v v8, v24
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv8i64_unmasked:
@@ -2125,6 +2136,7 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <v
 ; RV32-NEXT:    vmadd.vv v24, v8, v16
 ; RV32-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmadd_vx_nxv8i64_ta:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
index 52720755dd5b5b..ef5339940bb7ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
@@ -673,6 +673,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmax.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv1i64:
@@ -722,6 +723,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmax.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv2i64:
@@ -771,6 +773,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmax.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv4i64:
@@ -820,6 +823,7 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmax.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index f65e708f5303cc..2ab7130cac9307 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -1131,6 +1131,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv1i64:
@@ -1157,6 +1158,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv1i64_unmasked:
@@ -1205,6 +1207,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv2i64:
@@ -1231,6 +1234,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv2i64_unmasked:
@@ -1279,6 +1283,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv4i64:
@@ -1305,6 +1310,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv4i64_unmasked:
@@ -1353,6 +1359,7 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv8i64:
@@ -1379,6 +1386,7 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
index 8eb70fbc91fa54..4ccfa19a0c1172 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
@@ -673,6 +673,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmaxu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv1i64:
@@ -722,6 +723,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmaxu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv2i64:
@@ -771,6 +773,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmaxu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv4i64:
@@ -820,6 +823,7 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmaxu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmax_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index df1ad58e5ecbde..958eab4c72e092 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -1130,6 +1130,7 @@ define <vscale x 1 x i64> @vmaxu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv1i64:
@@ -1156,6 +1157,7 @@ define <vscale x 1 x i64> @vmaxu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv1i64_unmasked:
@@ -1204,6 +1206,7 @@ define <vscale x 2 x i64> @vmaxu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv2i64:
@@ -1230,6 +1233,7 @@ define <vscale x 2 x i64> @vmaxu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv2i64_unmasked:
@@ -1278,6 +1282,7 @@ define <vscale x 4 x i64> @vmaxu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv4i64:
@@ -1304,6 +1309,7 @@ define <vscale x 4 x i64> @vmaxu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv4i64_unmasked:
@@ -1352,6 +1358,7 @@ define <vscale x 8 x i64> @vmaxu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv8i64:
@@ -1378,6 +1385,7 @@ define <vscale x 8 x i64> @vmaxu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmaxu_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
index 7f526a21deac1c..a8665d27b4ce2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
@@ -673,6 +673,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmin.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv1i64:
@@ -722,6 +723,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmin.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv2i64:
@@ -771,6 +773,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmin.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv4i64:
@@ -820,6 +823,7 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmin.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 0bf0638633aa45..5b86f7c4e004c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -1131,6 +1131,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv1i64:
@@ -1157,6 +1158,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv1i64_unmasked:
@@ -1205,6 +1207,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv2i64:
@@ -1231,6 +1234,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv2i64_unmasked:
@@ -1279,6 +1283,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv4i64:
@@ -1305,6 +1310,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv4i64_unmasked:
@@ -1353,6 +1359,7 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv8i64:
@@ -1379,6 +1386,7 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
index d22a7dcccf0adc..d35c39414cf228 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
@@ -673,6 +673,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vminu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv1i64:
@@ -722,6 +723,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vminu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv2i64:
@@ -771,6 +773,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vminu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv4i64:
@@ -820,6 +823,7 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vminu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmin_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 2acebdf2e646d4..9e71de7d472b7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -1130,6 +1130,7 @@ define <vscale x 1 x i64> @vminu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv1i64:
@@ -1156,6 +1157,7 @@ define <vscale x 1 x i64> @vminu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv1i64_unmasked:
@@ -1204,6 +1206,7 @@ define <vscale x 2 x i64> @vminu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv2i64:
@@ -1230,6 +1233,7 @@ define <vscale x 2 x i64> @vminu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv2i64_unmasked:
@@ -1278,6 +1282,7 @@ define <vscale x 4 x i64> @vminu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv4i64:
@@ -1304,6 +1309,7 @@ define <vscale x 4 x i64> @vminu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv4i64_unmasked:
@@ -1352,6 +1358,7 @@ define <vscale x 8 x i64> @vminu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv8i64:
@@ -1378,6 +1385,7 @@ define <vscale x 8 x i64> @vminu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vminu_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
index 0b8620c90c62e0..6d969e1284a871 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
@@ -621,6 +621,7 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv1i64:
@@ -687,6 +688,7 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmul.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv2i64:
@@ -753,6 +755,7 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmul.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv4i64:
@@ -819,6 +822,7 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmul.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
index 51026cbcb8c4bf..bf0a0409eef3ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
@@ -939,6 +939,7 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv1i64:
@@ -965,6 +966,7 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv1i64_unmasked:
@@ -1013,6 +1015,7 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv2i64:
@@ -1039,6 +1042,7 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv2i64_unmasked:
@@ -1087,6 +1091,7 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv4i64:
@@ -1113,6 +1118,7 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv4i64_unmasked:
@@ -1161,6 +1167,7 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv8i64:
@@ -1187,6 +1194,7 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vmul.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmul_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
index 30edcaf9b15b53..8eaa24ca815870 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
@@ -1660,6 +1660,7 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsc
 ; RV32-NEXT:    vnmsac.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv1i64:
@@ -1690,6 +1691,7 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
 ; RV32-NEXT:    vnmsac.vv v9, v8, v10
 ; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv1i64_unmasked:
@@ -1733,6 +1735,7 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <
 ; RV32-NEXT:    vnmsac.vv v9, v8, v10, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv1i64_ta:
@@ -1794,6 +1797,7 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsc
 ; RV32-NEXT:    vnmsac.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv2i64:
@@ -1824,6 +1828,7 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
 ; RV32-NEXT:    vnmsac.vv v10, v8, v12
 ; RV32-NEXT:    vmv2r.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv2i64_unmasked:
@@ -1867,6 +1872,7 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <
 ; RV32-NEXT:    vnmsac.vv v10, v8, v12, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv2i64_ta:
@@ -1928,6 +1934,7 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsc
 ; RV32-NEXT:    vnmsac.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv4i64:
@@ -1958,6 +1965,7 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
 ; RV32-NEXT:    vnmsac.vv v12, v8, v16
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv4i64_unmasked:
@@ -2001,6 +2009,7 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <
 ; RV32-NEXT:    vnmsac.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv4i64_ta:
@@ -2064,6 +2073,7 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsc
 ; RV32-NEXT:    vnmsac.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv8i64:
@@ -2094,6 +2104,7 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
 ; RV32-NEXT:    vnmsac.vv v16, v8, v24
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv8i64_unmasked:
@@ -2138,6 +2149,7 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <
 ; RV32-NEXT:    vnmsac.vv v16, v8, v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsac_vx_nxv8i64_ta:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
index 186ffb64e59025..792bebd8490428 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
@@ -465,6 +465,7 @@ define <vscale x 1 x i64> @vnmsub_vx_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vnmsub.vv v8, v10, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsub_vx_nxv1i64:
@@ -502,6 +503,7 @@ define <vscale x 2 x i64> @vnmsub_vx_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vnmsac.vv v8, v10, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsub_vx_nxv2i64:
@@ -539,6 +541,7 @@ define <vscale x 4 x i64> @vnmsub_vx_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vnmsub.vv v8, v16, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsub_vx_nxv4i64:
@@ -577,6 +580,7 @@ define <vscale x 8 x i64> @vnmsub_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; RV32-NEXT:    vlse64.v v24, (a0), zero
 ; RV32-NEXT:    vnmsac.vv v8, v16, v24
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vnmsub_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
index dcfe07c1fba658..772fa9cabe0e0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
@@ -788,6 +788,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv1i64:
@@ -844,6 +845,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv2i64:
@@ -900,6 +902,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv4i64:
@@ -956,6 +959,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
index b9388e58797048..7e54e9cc084889 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
@@ -1331,6 +1331,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscal
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv1i64:
@@ -1357,6 +1358,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv1i64_unmasked:
@@ -1425,6 +1427,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscal
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv2i64:
@@ -1451,6 +1454,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv2i64_unmasked:
@@ -1519,6 +1523,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscal
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv4i64:
@@ -1545,6 +1550,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv4i64_unmasked:
@@ -1613,6 +1619,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscal
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv8i64:
@@ -1639,6 +1646,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vor_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
index 717b3a551d219f..e840036c6a3da8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
@@ -535,9 +535,13 @@ define <vscale x 128 x i8> @test_vp_reverse_nxv128i8(<vscale x 128 x i8> %src, i
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a3)
 ; CHECK-NEXT:    addi sp, s0, -80
+; CHECK-NEXT:    .cfi_def_cfa sp, 80
 ; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 80
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 
   %dst = call <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8> %src, <vscale x 128 x i1> splat (i1 1), i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll
index 0da05c1bd4364d..bfd45ffa142ae9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll
@@ -197,6 +197,7 @@ define <vscale x 1 x i64> @vp_splat_nxv1i64(i64 %val, <vscale x 1 x i1> %m, i32
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_nxv1i64:
@@ -219,6 +220,7 @@ define <vscale x 2 x i64> @vp_splat_nxv2i64(i64 %val, <vscale x 2 x i1> %m, i32
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_nxv2i64:
@@ -241,6 +243,7 @@ define <vscale x 4 x i64> @vp_splat_nxv4i64(i64 %val, <vscale x 4 x i1> %m, i32
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_nxv4i64:
@@ -263,6 +266,7 @@ define <vscale x 8 x i64> @vp_splat_nxv8i64(i64 %val, <vscale x 8 x i1> %m, i32
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_splat_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 86dfc74fdee37b..aecc38aef3ed8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -390,7 +390,9 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 128 x i8> @llvm.vp.merge.nxv128i8(<vscale x 128 x i1> %m, <vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, i32 %evl)
   ret <vscale x 128 x i8> %v
@@ -859,6 +861,7 @@ define <vscale x 1 x i64> @vpmerge_vx_nxv1i64(i64 %a, <vscale x 1 x i64> %vb, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_nxv1i64:
@@ -906,6 +909,7 @@ define <vscale x 2 x i64> @vpmerge_vx_nxv2i64(i64 %a, <vscale x 2 x i64> %vb, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_nxv2i64:
@@ -953,6 +957,7 @@ define <vscale x 4 x i64> @vpmerge_vx_nxv4i64(i64 %a, <vscale x 4 x i64> %vb, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_nxv4i64:
@@ -1000,6 +1005,7 @@ define <vscale x 8 x i64> @vpmerge_vx_nxv8i64(i64 %a, <vscale x 8 x i64> %vb, <v
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, tu, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpmerge_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index 0028f3035c2734..21366b513f3f34 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -2351,7 +2351,9 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   call void @llvm.vp.scatter.nxv16f64.nxv16p0(<vscale x 16 x double> %val, <vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 %evl)
   ret void
@@ -2431,7 +2433,9 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i16> %idxs
   call void @llvm.vp.scatter.nxv16f64.nxv16p0(<vscale x 16 x double> %val, <vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 %evl)
@@ -2512,7 +2516,9 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV64-NEXT:    slli a1, a0, 3
 ; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa sp, 16
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %eidxs = sext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
   %ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i64> %eidxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index d935e52149d207..10315a53ce1b92 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -506,7 +506,9 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   call void @llvm.vp.store.nxv17f64.p0(<vscale x 17 x double> %val, ptr %ptr, <vscale x 17 x i1> %m, i32 %evl)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index c41ddaee75a86c..240fc45ee81e45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1234,6 +1234,7 @@ define signext i64 @vpreduce_add_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_add_nxv1i64:
@@ -1266,6 +1267,7 @@ define signext i64 @vpwreduce_add_nxv1i32(i64 signext %s, <vscale x 1 x i32> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpwreduce_add_nxv1i32:
@@ -1300,6 +1302,7 @@ define signext i64 @vpwreduce_uadd_nxv1i32(i64 signext %s, <vscale x 1 x i32> %v
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpwreduce_uadd_nxv1i32:
@@ -1336,6 +1339,7 @@ define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umax_nxv1i64:
@@ -1370,6 +1374,7 @@ define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smax_nxv1i64:
@@ -1404,6 +1409,7 @@ define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umin_nxv1i64:
@@ -1438,6 +1444,7 @@ define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smin_nxv1i64:
@@ -1472,6 +1479,7 @@ define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_and_nxv1i64:
@@ -1506,6 +1514,7 @@ define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v, <
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_or_nxv1i64:
@@ -1540,6 +1549,7 @@ define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_xor_nxv1i64:
@@ -1574,6 +1584,7 @@ define signext i64 @vpreduce_add_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_add_nxv2i64:
@@ -1606,6 +1617,7 @@ define signext i64 @vwpreduce_add_nxv2i32(i64 signext %s, <vscale x 2 x i32> %v,
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwpreduce_add_nxv2i32:
@@ -1640,6 +1652,7 @@ define signext i64 @vwpreduce_uadd_nxv2i32(i64 signext %s, <vscale x 2 x i32> %v
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwpreduce_uadd_nxv2i32:
@@ -1676,6 +1689,7 @@ define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umax_nxv2i64:
@@ -1710,6 +1724,7 @@ define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smax_nxv2i64:
@@ -1744,6 +1759,7 @@ define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umin_nxv2i64:
@@ -1778,6 +1794,7 @@ define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smin_nxv2i64:
@@ -1812,6 +1829,7 @@ define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_and_nxv2i64:
@@ -1846,6 +1864,7 @@ define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v, <
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_or_nxv2i64:
@@ -1880,6 +1899,7 @@ define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_xor_nxv2i64:
@@ -1914,6 +1934,7 @@ define signext i64 @vpreduce_add_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_add_nxv4i64:
@@ -1946,6 +1967,7 @@ define signext i64 @vpwreduce_add_nxv4i32(i64 signext %s, <vscale x 4 x i32> %v,
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpwreduce_add_nxv4i32:
@@ -1980,6 +2002,7 @@ define signext i64 @vpwreduce_uadd_nxv4i32(i64 signext %s, <vscale x 4 x i32> %v
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpwreduce_uadd_nxv4i32:
@@ -2016,6 +2039,7 @@ define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umax_nxv4i64:
@@ -2050,6 +2074,7 @@ define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smax_nxv4i64:
@@ -2084,6 +2109,7 @@ define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_umin_nxv4i64:
@@ -2118,6 +2144,7 @@ define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_smin_nxv4i64:
@@ -2152,6 +2179,7 @@ define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_and_nxv4i64:
@@ -2186,6 +2214,7 @@ define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v, <
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_or_nxv4i64:
@@ -2220,6 +2249,7 @@ define signext i64 @vpreduce_xor_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpreduce_xor_nxv4i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
index 3a6ae5fdb21073..7464f8ca5143b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
@@ -946,6 +946,7 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vrem.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv1i64:
@@ -981,6 +982,7 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vrem_vi_nxv1i64_0:
@@ -1029,6 +1031,7 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vrem.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv2i64:
@@ -1064,6 +1067,7 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vrem_vi_nxv2i64_0:
@@ -1112,6 +1116,7 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vrem.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv4i64:
@@ -1147,6 +1152,7 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vrem_vi_nxv4i64_0:
@@ -1195,6 +1201,7 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vrem.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv8i64:
@@ -1230,6 +1237,7 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vrem_vi_nxv8i64_0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
index 2ef96f4b3896fc..32366f323c7635 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
@@ -900,6 +900,7 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv1i64:
@@ -926,6 +927,7 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv1i64_unmasked:
@@ -974,6 +976,7 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv2i64:
@@ -1000,6 +1003,7 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv2i64_unmasked:
@@ -1048,6 +1052,7 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv4i64:
@@ -1074,6 +1079,7 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv4i64_unmasked:
@@ -1122,6 +1128,7 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv8i64:
@@ -1148,6 +1155,7 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vrem.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrem_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
index ed40f5af4fa4ca..64fa565812bd9c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
@@ -685,6 +685,7 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vremu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv1i64:
@@ -716,6 +717,7 @@ define <vscale x 1 x i64> @vremu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vremu_vi_nxv1i64_0:
@@ -790,6 +792,7 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vremu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv2i64:
@@ -821,6 +824,7 @@ define <vscale x 2 x i64> @vremu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vremu_vi_nxv2i64_0:
@@ -895,6 +899,7 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vremu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv4i64:
@@ -926,6 +931,7 @@ define <vscale x 4 x i64> @vremu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vremu_vi_nxv4i64_0:
@@ -1000,6 +1006,7 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vremu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv8i64:
@@ -1031,6 +1038,7 @@ define <vscale x 8 x i64> @vremu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-V-NEXT:    li a0, -7
 ; RV32-V-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-V-NEXT:    ret
 ;
 ; ZVE64X-LABEL: vremu_vi_nxv8i64_0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
index 1f1ed4a1269acb..29368d58259d04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
@@ -899,6 +899,7 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv1i64:
@@ -925,6 +926,7 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv1i64_unmasked:
@@ -973,6 +975,7 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv2i64:
@@ -999,6 +1002,7 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv2i64_unmasked:
@@ -1047,6 +1051,7 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv4i64:
@@ -1073,6 +1078,7 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv4i64_unmasked:
@@ -1121,6 +1127,7 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv8i64:
@@ -1147,6 +1154,7 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vremu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll
index e97b1f41ad3d36..120fa6bbbf259e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll
@@ -410,6 +410,7 @@ define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv1i64:
@@ -445,6 +446,7 @@ define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv2i64:
@@ -480,6 +482,7 @@ define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv4i64:
@@ -515,6 +518,7 @@ define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
index be372c9aa54d5d..91e5d29005f3a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
@@ -847,6 +847,7 @@ define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v9, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv1i64:
@@ -873,6 +874,7 @@ define <vscale x 1 x i64> @vrsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v9, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv1i64_unmasked:
@@ -921,6 +923,7 @@ define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v10, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv2i64:
@@ -947,6 +950,7 @@ define <vscale x 2 x i64> @vrsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v10, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv2i64_unmasked:
@@ -995,6 +999,7 @@ define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v12, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv4i64:
@@ -1021,6 +1026,7 @@ define <vscale x 4 x i64> @vrsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v12, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv4i64_unmasked:
@@ -1069,6 +1075,7 @@ define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v16, v8, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv8i64:
@@ -1095,6 +1102,7 @@ define <vscale x 8 x i64> @vrsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v16, v8
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrsub_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll
index 6a8b801254057b..411ad231d64581 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll
@@ -640,6 +640,7 @@ define <vscale x 1 x i64> @sadd_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_nxv1i64_vx:
@@ -687,6 +688,7 @@ define <vscale x 2 x i64> @sadd_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_nxv2i64_vx:
@@ -734,6 +736,7 @@ define <vscale x 4 x i64> @sadd_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_nxv4i64_vx:
@@ -781,6 +784,7 @@ define <vscale x 8 x i64> @sadd_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sadd_nxv8i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index 71b91f56e89a5e..bbf31ed8f6f9be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -1431,6 +1431,7 @@ define <vscale x 1 x i64> @vsadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv1i64:
@@ -1457,6 +1458,7 @@ define <vscale x 1 x i64> @vsadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv1i64_unmasked:
@@ -1525,6 +1527,7 @@ define <vscale x 2 x i64> @vsadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv2i64:
@@ -1551,6 +1554,7 @@ define <vscale x 2 x i64> @vsadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv2i64_unmasked:
@@ -1619,6 +1623,7 @@ define <vscale x 4 x i64> @vsadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv4i64:
@@ -1645,6 +1650,7 @@ define <vscale x 4 x i64> @vsadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv4i64_unmasked:
@@ -1713,6 +1719,7 @@ define <vscale x 8 x i64> @vsadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv8i64:
@@ -1739,6 +1746,7 @@ define <vscale x 8 x i64> @vsadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsadd_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll
index 4fe765c34ba6ce..f38f0d011a2e20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll
@@ -640,6 +640,7 @@ define <vscale x 1 x i64> @uadd_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_nxv1i64_vx:
@@ -687,6 +688,7 @@ define <vscale x 2 x i64> @uadd_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_nxv2i64_vx:
@@ -734,6 +736,7 @@ define <vscale x 4 x i64> @uadd_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_nxv4i64_vx:
@@ -781,6 +784,7 @@ define <vscale x 8 x i64> @uadd_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsaddu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uadd_nxv8i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index 454a4ebab04a28..a0f00c84a944f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -1430,6 +1430,7 @@ define <vscale x 1 x i64> @vsaddu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv1i64:
@@ -1456,6 +1457,7 @@ define <vscale x 1 x i64> @vsaddu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv1i64_unmasked:
@@ -1524,6 +1526,7 @@ define <vscale x 2 x i64> @vsaddu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv2i64:
@@ -1550,6 +1553,7 @@ define <vscale x 2 x i64> @vsaddu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv2i64_unmasked:
@@ -1618,6 +1622,7 @@ define <vscale x 4 x i64> @vsaddu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv4i64:
@@ -1644,6 +1649,7 @@ define <vscale x 4 x i64> @vsaddu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv4i64_unmasked:
@@ -1712,6 +1718,7 @@ define <vscale x 8 x i64> @vsaddu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv8i64:
@@ -1738,6 +1745,7 @@ define <vscale x 8 x i64> @vsaddu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsaddu_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
index 2715ec78bd7949..b799c6633d25c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
@@ -633,6 +633,7 @@ define <vscale x 1 x i64> @vmerge_xv_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmerge_xv_nxv1i64:
@@ -677,6 +678,7 @@ define <vscale x 2 x i64> @vmerge_xv_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmerge_xv_nxv2i64:
@@ -721,6 +723,7 @@ define <vscale x 4 x i64> @vmerge_xv_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmerge_xv_nxv4i64:
@@ -765,6 +768,7 @@ define <vscale x 8 x i64> @vmerge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmerge_xv_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index ee0617c9314801..c61e024df0d36d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -419,7 +419,9 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i32> @llvm.vp.select.nxv32i32(<vscale x 32 x i1> %a, <vscale x 32 x i32> %b, <vscale x 32 x i32> %c, i32 %evl)
   ret <vscale x 32 x i32> %v
@@ -498,7 +500,9 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 5
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %evl = call i32 @llvm.vscale.i32()
   %evl0 = mul i32 %evl, 8
@@ -784,7 +788,9 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.select.nxv16f64(<vscale x 16 x i1> %a, <vscale x 16 x double> %b, <vscale x 16 x double> %c, i32 %evl)
   ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index d163988b3d41cd..c027446f03ae61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -541,7 +541,9 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 3
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
index 721d6ef26d61e1..cc8b151571bb22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
@@ -45,6 +45,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
 ; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV32V-NEXT:    vlse64.v v8, (a0), zero
 ; RV32V-NEXT:    addi sp, sp, 16
+; RV32V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vsplat_nxv8i64_4:
@@ -69,6 +70,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_5(i64 %a) {
 ; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV32V-NEXT:    vlse64.v v8, (a0), zero
 ; RV32V-NEXT:    addi sp, sp, 16
+; RV32V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vsplat_nxv8i64_5:
@@ -146,6 +148,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) {
 ; RV32V-NEXT:    vlse64.v v16, (a0), zero
 ; RV32V-NEXT:    vadd.vv v8, v8, v16
 ; RV32V-NEXT:    addi sp, sp, 16
+; RV32V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vadd_vx_nxv8i64_10:
@@ -175,6 +178,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
 ; RV32V-NEXT:    vlse64.v v16, (a0), zero
 ; RV32V-NEXT:    vadd.vv v8, v8, v16
 ; RV32V-NEXT:    addi sp, sp, 16
+; RV32V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vadd_vx_nxv8i64_11:
@@ -201,6 +205,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_12(<vscale x 8 x i64> %v, i64 %a) {
 ; RV32V-NEXT:    vlse64.v v16, (a0), zero
 ; RV32V-NEXT:    vadd.vv v8, v8, v16
 ; RV32V-NEXT:    addi sp, sp, 16
+; RV32V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vadd_vx_nxv8i64_12:
@@ -244,6 +249,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_14(i32 %a) {
 ; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV32V-NEXT:    vlse64.v v8, (a0), zero
 ; RV32V-NEXT:    addi sp, sp, 16
+; RV32V-NEXT:    .cfi_def_cfa_offset 0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vsplat_nxv8i64_14:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
index c043858c029473..8d70a8590399ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
@@ -658,6 +658,7 @@ define <vscale x 1 x i64> @ssub_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_nxv1i64_vx:
@@ -706,6 +707,7 @@ define <vscale x 2 x i64> @ssub_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_nxv2i64_vx:
@@ -754,6 +756,7 @@ define <vscale x 4 x i64> @ssub_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_nxv4i64_vx:
@@ -802,6 +805,7 @@ define <vscale x 8 x i64> @ssub_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vssub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub_nxv8i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index 613b58b0f1b88a..478c0d586308e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -1475,6 +1475,7 @@ define <vscale x 1 x i64> @vssub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv1i64:
@@ -1501,6 +1502,7 @@ define <vscale x 1 x i64> @vssub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv1i64_unmasked:
@@ -1571,6 +1573,7 @@ define <vscale x 2 x i64> @vssub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv2i64:
@@ -1597,6 +1600,7 @@ define <vscale x 2 x i64> @vssub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv2i64_unmasked:
@@ -1667,6 +1671,7 @@ define <vscale x 4 x i64> @vssub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv4i64:
@@ -1693,6 +1698,7 @@ define <vscale x 4 x i64> @vssub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv4i64_unmasked:
@@ -1763,6 +1769,7 @@ define <vscale x 8 x i64> @vssub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv8i64:
@@ -1789,6 +1796,7 @@ define <vscale x 8 x i64> @vssub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssub_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
index 5349548a213bc7..5fbea260d2832b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
@@ -658,6 +658,7 @@ define <vscale x 1 x i64> @usub_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_nxv1i64_vx:
@@ -706,6 +707,7 @@ define <vscale x 2 x i64> @usub_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_nxv2i64_vx:
@@ -754,6 +756,7 @@ define <vscale x 4 x i64> @usub_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_nxv4i64_vx:
@@ -802,6 +805,7 @@ define <vscale x 8 x i64> @usub_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vssubu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usub_nxv8i64_vx:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 8c729d7d9bfb6e..6428dfde6b23de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -1473,6 +1473,7 @@ define <vscale x 1 x i64> @vssubu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv1i64:
@@ -1499,6 +1500,7 @@ define <vscale x 1 x i64> @vssubu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv1i64_unmasked:
@@ -1569,6 +1571,7 @@ define <vscale x 2 x i64> @vssubu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv2i64:
@@ -1595,6 +1598,7 @@ define <vscale x 2 x i64> @vssubu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv2i64_unmasked:
@@ -1665,6 +1669,7 @@ define <vscale x 4 x i64> @vssubu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv4i64:
@@ -1691,6 +1696,7 @@ define <vscale x 4 x i64> @vssubu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv4i64_unmasked:
@@ -1761,6 +1767,7 @@ define <vscale x 8 x i64> @vssubu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv8i64:
@@ -1787,6 +1794,7 @@ define <vscale x 8 x i64> @vssubu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i6
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vssubu_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
index c2173c9a291fcf..8b07740f0aeb2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
@@ -629,6 +629,7 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv1i64:
@@ -675,6 +676,7 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv2i64:
@@ -721,6 +723,7 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv4i64:
@@ -767,6 +770,7 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
index a2b9285fedeaf7..bc6fcee4ef2674 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
@@ -927,6 +927,7 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv1i64:
@@ -953,6 +954,7 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv1i64_unmasked:
@@ -1001,6 +1003,7 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv2i64:
@@ -1027,6 +1030,7 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv2i64_unmasked:
@@ -1075,6 +1079,7 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv4i64:
@@ -1101,6 +1106,7 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv4i64_unmasked:
@@ -1149,6 +1155,7 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv8i64:
@@ -1175,6 +1182,7 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vsub.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vsub_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 27755c166cc52d..b9697770994ef3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -356,7 +356,9 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 16
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl)
   ret <vscale x 32 x i32> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index 7c96a9e9e10f65..a9994c55d14fb2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -533,7 +533,9 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; ZVFH-NEXT:    csrr a0, vlenb
 ; ZVFH-NEXT:    slli a0, a0, 3
 ; ZVFH-NEXT:    add sp, sp, a0
+; ZVFH-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFH-NEXT:    addi sp, sp, 16
+; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index b7ce0e3f196f05..b22d7098f5f2b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1478,6 +1478,7 @@ define <vscale x 8 x i64> @vwadd_vx_splat_zext(<vscale x 8 x i32> %va, i32 %b) {
 ; RV32-NEXT:    vwaddu.wv v16, v16, v8
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwadd_vx_splat_zext:
@@ -1537,6 +1538,7 @@ define <vscale x 8 x i64> @vwadd_wx_splat_zext(<vscale x 8 x i64> %va, i32 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vadd.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwadd_wx_splat_zext:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
index b03a105610dfdf..733eaac19b5688 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
@@ -978,6 +978,7 @@ define <vscale x 1 x i64> @vxor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vxor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv1i64:
@@ -1044,6 +1045,7 @@ define <vscale x 2 x i64> @vxor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vxor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv2i64:
@@ -1110,6 +1112,7 @@ define <vscale x 4 x i64> @vxor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vxor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv4i64:
@@ -1176,6 +1179,7 @@ define <vscale x 8 x i64> @vxor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vxor.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
index f2235b4fdc94ba..52a0ce5ba25667 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
@@ -1699,6 +1699,7 @@ define <vscale x 1 x i64> @vxor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v9, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv1i64:
@@ -1725,6 +1726,7 @@ define <vscale x 1 x i64> @vxor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v9
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv1i64_unmasked:
@@ -1813,6 +1815,7 @@ define <vscale x 2 x i64> @vxor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v10, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv2i64:
@@ -1839,6 +1842,7 @@ define <vscale x 2 x i64> @vxor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v10
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv2i64_unmasked:
@@ -1927,6 +1931,7 @@ define <vscale x 4 x i64> @vxor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v12, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv4i64:
@@ -1953,6 +1958,7 @@ define <vscale x 4 x i64> @vxor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v12
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv4i64_unmasked:
@@ -2041,6 +2047,7 @@ define <vscale x 8 x i64> @vxor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v16, v0.t
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv8i64:
@@ -2067,6 +2074,7 @@ define <vscale x 8 x i64> @vxor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; RV32-NEXT:    vxor.vv v8, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vxor_vx_nxv8i64_unmasked:
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index fcd852f1210df5..2b1ed7df010e03 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -63,7 +63,9 @@ body: |
     ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
     ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3
     ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 16
     ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NEXT: PseudoRET
     %0:gpr = COPY $x10
     %1:gprnox0 = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index a320b44d2c6a81..0c62fb4050051f 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -67,7 +67,9 @@ define i32 @f3() shadowcallstack {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    call bar
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    lw ra, -4(gp)
 ; RV32-NEXT:    addi gp, gp, -4
 ; RV32-NEXT:    .cfi_restore gp
@@ -84,7 +86,9 @@ define i32 @f3() shadowcallstack {
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    call bar
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ld ra, -8(gp)
 ; RV64-NEXT:    addi gp, gp, -8
 ; RV64-NEXT:    .cfi_restore gp
@@ -99,7 +103,9 @@ define i32 @f3() shadowcallstack {
 ; RV32-ZICFISS-NEXT:    .cfi_offset ra, -4
 ; RV32-ZICFISS-NEXT:    call bar
 ; RV32-ZICFISS-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT:    .cfi_restore ra
 ; RV32-ZICFISS-NEXT:    addi sp, sp, 16
+; RV32-ZICFISS-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-ZICFISS-NEXT:    sspopchk ra
 ; RV32-ZICFISS-NEXT:    ret
 ;
@@ -112,7 +118,9 @@ define i32 @f3() shadowcallstack {
 ; RV64-ZICFISS-NEXT:    .cfi_offset ra, -8
 ; RV64-ZICFISS-NEXT:    call bar
 ; RV64-ZICFISS-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT:    .cfi_restore ra
 ; RV64-ZICFISS-NEXT:    addi sp, sp, 16
+; RV64-ZICFISS-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-ZICFISS-NEXT:    sspopchk ra
 ; RV64-ZICFISS-NEXT:    ret
   %res = call i32 @bar()
@@ -150,7 +158,12 @@ define i32 @f4() shadowcallstack {
 ; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
+; RV32-NEXT:    .cfi_restore s2
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    lw ra, -4(gp)
 ; RV32-NEXT:    addi gp, gp, -4
 ; RV32-NEXT:    .cfi_restore gp
@@ -185,7 +198,12 @@ define i32 @f4() shadowcallstack {
 ; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
+; RV64-NEXT:    .cfi_restore s2
 ; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ld ra, -8(gp)
 ; RV64-NEXT:    addi gp, gp, -8
 ; RV64-NEXT:    .cfi_restore gp
@@ -218,7 +236,12 @@ define i32 @f4() shadowcallstack {
 ; RV32-ZICFISS-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32-ZICFISS-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
 ; RV32-ZICFISS-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT:    .cfi_restore ra
+; RV32-ZICFISS-NEXT:    .cfi_restore s0
+; RV32-ZICFISS-NEXT:    .cfi_restore s1
+; RV32-ZICFISS-NEXT:    .cfi_restore s2
 ; RV32-ZICFISS-NEXT:    addi sp, sp, 16
+; RV32-ZICFISS-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-ZICFISS-NEXT:    sspopchk ra
 ; RV32-ZICFISS-NEXT:    ret
 ;
@@ -249,7 +272,12 @@ define i32 @f4() shadowcallstack {
 ; RV64-ZICFISS-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64-ZICFISS-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64-ZICFISS-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT:    .cfi_restore ra
+; RV64-ZICFISS-NEXT:    .cfi_restore s0
+; RV64-ZICFISS-NEXT:    .cfi_restore s1
+; RV64-ZICFISS-NEXT:    .cfi_restore s2
 ; RV64-ZICFISS-NEXT:    addi sp, sp, 32
+; RV64-ZICFISS-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-ZICFISS-NEXT:    sspopchk ra
 ; RV64-ZICFISS-NEXT:    ret
   %res1 = call i32 @bar()
diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll
index 1bffa42b78675a..64be997c191be2 100644
--- a/llvm/test/CodeGen/RISCV/shl-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll
@@ -432,7 +432,11 @@ define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: shl_cttz_multiuse_i32:
@@ -453,7 +457,11 @@ define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
 ; RV32ZBB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ZBB-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32ZBB-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    .cfi_restore ra
+; RV32ZBB-NEXT:    .cfi_restore s0
+; RV32ZBB-NEXT:    .cfi_restore s1
 ; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64I-LABEL: shl_cttz_multiuse_i32:
@@ -483,7 +491,11 @@ define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: shl_cttz_multiuse_i32:
@@ -504,7 +516,11 @@ define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) {
 ; RV64ZBB-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64ZBB-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64ZBB-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZBB-NEXT:    .cfi_restore ra
+; RV64ZBB-NEXT:    .cfi_restore s0
+; RV64ZBB-NEXT:    .cfi_restore s1
 ; RV64ZBB-NEXT:    addi sp, sp, 32
+; RV64ZBB-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBB-NEXT:    ret
 entry:
   %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true)
diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll b/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
index 5e557de37423ee..20a0d6d07b09c7 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
@@ -40,7 +40,9 @@ define dso_local signext i32 @test_shrinkwrap_jump_table(ptr noundef %m) local_u
 ; CHECK-NEXT:    call default_func
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr %m, align 4
diff --git a/llvm/test/CodeGen/RISCV/stack-inst-compress.mir b/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
index 5cc4615bb64a13..2e6d888e65ba64 100644
--- a/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
+++ b/llvm/test/CodeGen/RISCV/stack-inst-compress.mir
@@ -54,8 +54,11 @@ body:             |
     ; CHECK-RV32-NO-COM-NEXT: renamable $x10 = ADDI $x2, 12
     ; CHECK-RV32-NO-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 32
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 2032
     ; CHECK-RV32-NO-COM-NEXT: $x1 = LW $x2, 2028 :: (load (s32) from %stack.1)
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV32-NO-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV32-COM-LABEL: name: _Z15stack_size_2048v
@@ -70,8 +73,11 @@ body:             |
     ; CHECK-RV32-COM-NEXT: renamable $x10 = ADDI $x2, 12
     ; CHECK-RV32-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADDI $x2, 1808
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 256
     ; CHECK-RV32-COM-NEXT: $x1 = LW $x2, 252 :: (load (s32) from %stack.1)
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADDI $x2, 256
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV32-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV64-NO-COM-LABEL: name: _Z15stack_size_2048v
@@ -86,8 +92,11 @@ body:             |
     ; CHECK-RV64-NO-COM-NEXT: renamable $x10 = ADDI $x2, 8
     ; CHECK-RV64-NO-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 32
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 2032
     ; CHECK-RV64-NO-COM-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.1)
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV64-NO-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV64-COM-LABEL: name: _Z15stack_size_2048v
@@ -102,8 +111,11 @@ body:             |
     ; CHECK-RV64-COM-NEXT: renamable $x10 = ADDI $x2, 8
     ; CHECK-RV64-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADDI $x2, 1568
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 496
     ; CHECK-RV64-COM-NEXT: $x1 = LD $x2, 488 :: (load (s64) from %stack.1)
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADDI $x2, 496
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV64-COM-NEXT: PseudoRET
     ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
     renamable $x10 = ADDI %stack.0, 0
@@ -142,8 +154,11 @@ body:             |
     ; CHECK-RV32-NO-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADDI killed $x2, 48
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 2032
     ; CHECK-RV32-NO-COM-NEXT: $x1 = LW $x2, 2028 :: (load (s32) from %stack.1)
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV32-NO-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV32-COM-LABEL: name: _Z15stack_size_4096v
@@ -160,8 +175,11 @@ body:             |
     ; CHECK-RV32-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADDI killed $x2, 1824
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 256
     ; CHECK-RV32-COM-NEXT: $x1 = LW $x2, 252 :: (load (s32) from %stack.1)
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADDI $x2, 256
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV32-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV64-NO-COM-LABEL: name: _Z15stack_size_4096v
@@ -178,8 +196,11 @@ body:             |
     ; CHECK-RV64-NO-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADDI killed $x2, 48
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 2032
     ; CHECK-RV64-NO-COM-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.1)
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV64-NO-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV64-COM-LABEL: name: _Z15stack_size_4096v
@@ -196,8 +217,11 @@ body:             |
     ; CHECK-RV64-COM-NEXT: PseudoCALL target-flags(riscv-call) @_Z6calleePi, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADDI killed $x2, 1584
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 496
     ; CHECK-RV64-COM-NEXT: $x1 = LD $x2, 488 :: (load (s64) from %stack.1)
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADDI $x2, 496
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV64-COM-NEXT: PseudoRET
     ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
     renamable $x10 = ADDI %stack.0, 0
@@ -238,8 +262,11 @@ body:             |
     ; CHECK-RV32-NO-COM-NEXT: $x10 = frame-destroy LUI 2
     ; CHECK-RV32-NO-COM-NEXT: $x10 = frame-destroy ADDI killed $x10, -2016
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 2032
     ; CHECK-RV32-NO-COM-NEXT: $x1 = LW $x2, 2028 :: (load (s32) from %stack.1)
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV32-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-RV32-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV32-NO-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV32-COM-LABEL: name: _Z15stack_size_8192v
@@ -258,8 +285,11 @@ body:             |
     ; CHECK-RV32-COM-NEXT: $x10 = frame-destroy LUI 2
     ; CHECK-RV32-COM-NEXT: $x10 = frame-destroy ADDI killed $x10, -240
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 256
     ; CHECK-RV32-COM-NEXT: $x1 = LW $x2, 252 :: (load (s32) from %stack.1)
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV32-COM-NEXT: $x2 = frame-destroy ADDI $x2, 256
+    ; CHECK-RV32-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV32-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV64-NO-COM-LABEL: name: _Z15stack_size_8192v
@@ -278,8 +308,11 @@ body:             |
     ; CHECK-RV64-NO-COM-NEXT: $x10 = frame-destroy LUI 2
     ; CHECK-RV64-NO-COM-NEXT: $x10 = frame-destroy ADDIW killed $x10, -2016
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 2032
     ; CHECK-RV64-NO-COM-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.1)
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV64-NO-COM-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-RV64-NO-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV64-NO-COM-NEXT: PseudoRET
     ;
     ; CHECK-RV64-COM-LABEL: name: _Z15stack_size_8192v
@@ -298,8 +331,11 @@ body:             |
     ; CHECK-RV64-COM-NEXT: $x10 = frame-destroy LUI 2
     ; CHECK-RV64-COM-NEXT: $x10 = frame-destroy ADDIW killed $x10, -480
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 496
     ; CHECK-RV64-COM-NEXT: $x1 = LD $x2, 488 :: (load (s64) from %stack.1)
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
     ; CHECK-RV64-COM-NEXT: $x2 = frame-destroy ADDI $x2, 496
+    ; CHECK-RV64-COM-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-RV64-COM-NEXT: PseudoRET
     ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
     renamable $x10 = ADDI %stack.0, 0
diff --git a/llvm/test/CodeGen/RISCV/stack-offset.ll b/llvm/test/CodeGen/RISCV/stack-offset.ll
index cc81fd62eba9da..402d3546eae297 100644
--- a/llvm/test/CodeGen/RISCV/stack-offset.ll
+++ b/llvm/test/CodeGen/RISCV/stack-offset.ll
@@ -32,8 +32,11 @@ define void @test() {
 ; RV32I-NEXT:    call inspect
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    addi sp, sp, 1136
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: test:
@@ -55,8 +58,11 @@ define void @test() {
 ; RV32ZBA-NEXT:    call inspect
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
 ; RV32ZBA-NEXT:    addi sp, sp, 1136
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore ra
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64I-LABEL: test:
@@ -80,8 +86,11 @@ define void @test() {
 ; RV64I-NEXT:    call inspect
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    addi sp, sp, 1136
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: test:
@@ -103,8 +112,11 @@ define void @test() {
 ; RV64ZBA-NEXT:    call inspect
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
 ; RV64ZBA-NEXT:    addi sp, sp, 1136
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    .cfi_restore ra
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBA-NEXT:    ret
   %p4 = alloca [64 x i8], align 1
   %p3 = alloca [1024 x i8], align 1
@@ -131,8 +143,11 @@ define void @align_8() {
 ; RV32I-NEXT:    call inspect
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: align_8:
@@ -150,8 +165,11 @@ define void @align_8() {
 ; RV32ZBA-NEXT:    call inspect
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
 ; RV32ZBA-NEXT:    addi sp, sp, 48
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore ra
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64I-LABEL: align_8:
@@ -170,8 +188,11 @@ define void @align_8() {
 ; RV64I-NEXT:    call inspect
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: align_8:
@@ -189,8 +210,11 @@ define void @align_8() {
 ; RV64ZBA-NEXT:    call inspect
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
 ; RV64ZBA-NEXT:    addi sp, sp, 64
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    .cfi_restore ra
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBA-NEXT:    ret
   %p2 = alloca i8, align 8
   %p1 = alloca [4097 x i8], align 1
@@ -215,8 +239,11 @@ define void @align_4() {
 ; RV32I-NEXT:    call inspect
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: align_4:
@@ -234,8 +261,11 @@ define void @align_4() {
 ; RV32ZBA-NEXT:    call inspect
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
 ; RV32ZBA-NEXT:    addi sp, sp, 48
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore ra
 ; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64I-LABEL: align_4:
@@ -254,8 +284,11 @@ define void @align_4() {
 ; RV64I-NEXT:    call inspect
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: align_4:
@@ -273,8 +306,11 @@ define void @align_4() {
 ; RV64ZBA-NEXT:    call inspect
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
 ; RV64ZBA-NEXT:    addi sp, sp, 64
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    .cfi_restore ra
 ; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZBA-NEXT:    ret
   %p2 = alloca i8, align 4
   %p1 = alloca [4097 x i8], align 1
@@ -299,8 +335,11 @@ define void @align_2() {
 ; RV32-NEXT:    call inspect
 ; RV32-NEXT:    addi sp, sp, 2032
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: align_2:
@@ -319,8 +358,11 @@ define void @align_2() {
 ; RV64-NEXT:    call inspect
 ; RV64-NEXT:    addi sp, sp, 2032
 ; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %p2 = alloca i8, align 2
   %p1 = alloca [4097 x i8], align 1
@@ -346,8 +388,11 @@ define void @align_1() {
 ; RV32-NEXT:    call inspect
 ; RV32-NEXT:    addi sp, sp, 2032
 ; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
 ; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: align_1:
@@ -366,8 +411,11 @@ define void @align_1() {
 ; RV64-NEXT:    call inspect
 ; RV64-NEXT:    addi sp, sp, 2032
 ; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
 ; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
 ; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %p2 = alloca i8, align 1
   %p1 = alloca [4097 x i8], align 1
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
index 56723745d012dd..3990fc31b7d212 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
@@ -32,10 +32,15 @@ define void @caller(i32 %n) {
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -64
+; RV32I-NEXT:    .cfi_def_cfa sp, 64
 ; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
+; RV32I-NEXT:    .cfi_restore s1
 ; RV32I-NEXT:    addi sp, sp, 64
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller:
@@ -59,10 +64,15 @@ define void @caller(i32 %n) {
 ; RV32I-ILP32E-NEXT:    mv a1, s1
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -64
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 64
 ; RV32I-ILP32E-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
+; RV32I-ILP32E-NEXT:    .cfi_restore s1
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 64
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller:
@@ -88,10 +98,15 @@ define void @caller(i32 %n) {
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -64
+; RV64I-NEXT:    .cfi_def_cfa sp, 64
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
+; RV64I-NEXT:    .cfi_restore s1
 ; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller:
@@ -117,10 +132,15 @@ define void @caller(i32 %n) {
 ; RV64I-LP64E-NEXT:    mv a1, s1
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -64
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 64
 ; RV64I-LP64E-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
+; RV64I-LP64E-NEXT:    .cfi_restore s1
 ; RV64I-LP64E-NEXT:    addi sp, sp, 64
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, i32 %n
   %2 = alloca i32, align 64
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll
index 58876e4888a913..368840933f04ba 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll
@@ -20,7 +20,9 @@ define void @caller16() {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller16:
@@ -37,9 +39,13 @@ define void @caller16() {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -16
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 16
 ; RV32I-ILP32E-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 16
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller16:
@@ -51,7 +57,9 @@ define void @caller16() {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller16:
@@ -68,9 +76,13 @@ define void @caller16() {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -32
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 32
 ; RV64I-LP64E-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 32
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 16
   call void @callee(ptr %1)
@@ -87,7 +99,9 @@ define void @caller_no_realign16() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign16:
@@ -99,7 +113,9 @@ define void @caller_no_realign16() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign16:
@@ -111,7 +127,9 @@ define void @caller_no_realign16() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign16:
@@ -123,7 +141,9 @@ define void @caller_no_realign16() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 16
   call void @callee(ptr %1)
@@ -145,9 +165,13 @@ define void @caller32() {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -32
+; RV32I-NEXT:    .cfi_def_cfa sp, 32
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller32:
@@ -164,9 +188,13 @@ define void @caller32() {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -32
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 32
 ; RV32I-ILP32E-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 32
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller32:
@@ -183,9 +211,13 @@ define void @caller32() {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -32
+; RV64I-NEXT:    .cfi_def_cfa sp, 32
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller32:
@@ -202,9 +234,13 @@ define void @caller32() {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -32
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 32
 ; RV64I-LP64E-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 32
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 32
   call void @callee(ptr %1)
@@ -221,7 +257,9 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign32:
@@ -233,7 +271,9 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign32:
@@ -245,7 +285,9 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign32:
@@ -257,7 +299,9 @@ define void @caller_no_realign32() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 32
   call void @callee(ptr %1)
@@ -279,9 +323,13 @@ define void @caller64() {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -64
+; RV32I-NEXT:    .cfi_def_cfa sp, 64
 ; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 64
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller64:
@@ -298,9 +346,13 @@ define void @caller64() {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -64
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 64
 ; RV32I-ILP32E-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 64
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller64:
@@ -317,9 +369,13 @@ define void @caller64() {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -64
+; RV64I-NEXT:    .cfi_def_cfa sp, 64
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller64:
@@ -336,9 +392,13 @@ define void @caller64() {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -64
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 64
 ; RV64I-LP64E-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 64
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 64
   call void @callee(ptr %1)
@@ -355,7 +415,9 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign64:
@@ -367,7 +429,9 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign64:
@@ -379,7 +443,9 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign64:
@@ -391,7 +457,9 @@ define void @caller_no_realign64() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 64
   call void @callee(ptr %1)
@@ -413,9 +481,13 @@ define void @caller128() {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -128
+; RV32I-NEXT:    .cfi_def_cfa sp, 128
 ; RV32I-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 128
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller128:
@@ -432,9 +504,13 @@ define void @caller128() {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -128
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 128
 ; RV32I-ILP32E-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 128
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller128:
@@ -451,9 +527,13 @@ define void @caller128() {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -128
+; RV64I-NEXT:    .cfi_def_cfa sp, 128
 ; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 128
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller128:
@@ -470,9 +550,13 @@ define void @caller128() {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -128
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 128
 ; RV64I-LP64E-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 128
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 128
   call void @callee(ptr %1)
@@ -489,7 +573,9 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign128:
@@ -501,7 +587,9 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign128:
@@ -513,7 +601,9 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign128:
@@ -525,7 +615,9 @@ define void @caller_no_realign128() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 128
   call void @callee(ptr %1)
@@ -547,9 +639,13 @@ define void @caller256() {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -256
+; RV32I-NEXT:    .cfi_def_cfa sp, 256
 ; RV32I-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 256
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller256:
@@ -566,9 +662,13 @@ define void @caller256() {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -256
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 256
 ; RV32I-ILP32E-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 256
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller256:
@@ -585,9 +685,13 @@ define void @caller256() {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -256
+; RV64I-NEXT:    .cfi_def_cfa sp, 256
 ; RV64I-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 256
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller256:
@@ -604,9 +708,13 @@ define void @caller256() {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -256
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 256
 ; RV64I-LP64E-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 256
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 256
   call void @callee(ptr %1)
@@ -623,7 +731,9 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign256:
@@ -635,7 +745,9 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign256:
@@ -647,7 +759,9 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign256:
@@ -659,7 +773,9 @@ define void @caller_no_realign256() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 256
   call void @callee(ptr %1)
@@ -681,9 +797,13 @@ define void @caller512() {
 ; RV32I-NEXT:    addi a0, sp, 512
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -1024
+; RV32I-NEXT:    .cfi_def_cfa sp, 1024
 ; RV32I-NEXT:    lw ra, 1020(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 1016(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 1024
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller512:
@@ -700,9 +820,13 @@ define void @caller512() {
 ; RV32I-ILP32E-NEXT:    addi a0, sp, 512
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -1024
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 1024
 ; RV32I-ILP32E-NEXT:    lw ra, 1020(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 1016(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 1024
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller512:
@@ -719,9 +843,13 @@ define void @caller512() {
 ; RV64I-NEXT:    addi a0, sp, 512
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -1024
+; RV64I-NEXT:    .cfi_def_cfa sp, 1024
 ; RV64I-NEXT:    ld ra, 1016(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 1008(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 1024
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller512:
@@ -738,9 +866,13 @@ define void @caller512() {
 ; RV64I-LP64E-NEXT:    addi a0, sp, 512
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -1024
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 1024
 ; RV64I-LP64E-NEXT:    ld ra, 1016(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 1008(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 1024
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 512
   call void @callee(ptr %1)
@@ -757,7 +889,9 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign512:
@@ -769,7 +903,9 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign512:
@@ -781,7 +917,9 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign512:
@@ -793,7 +931,9 @@ define void @caller_no_realign512() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 512
   call void @callee(ptr %1)
@@ -816,9 +956,13 @@ define void @caller1024() {
 ; RV32I-NEXT:    addi a0, sp, 1024
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -2032
+; RV32I-NEXT:    .cfi_def_cfa sp, 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller1024:
@@ -836,9 +980,13 @@ define void @caller1024() {
 ; RV32I-ILP32E-NEXT:    addi a0, sp, 1024
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -2044
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 2044
 ; RV32I-ILP32E-NEXT:    lw ra, 2040(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 2036(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 2044
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller1024:
@@ -856,9 +1004,13 @@ define void @caller1024() {
 ; RV64I-NEXT:    addi a0, sp, 1024
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -2032
+; RV64I-NEXT:    .cfi_def_cfa sp, 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller1024:
@@ -876,9 +1028,13 @@ define void @caller1024() {
 ; RV64I-LP64E-NEXT:    addi a0, sp, 1024
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -2040
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 2040
 ; RV64I-LP64E-NEXT:    ld ra, 2032(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 2024(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 2040
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 1024
   call void @callee(ptr %1)
@@ -895,7 +1051,9 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign1024:
@@ -907,7 +1065,9 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign1024:
@@ -919,7 +1079,9 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign1024:
@@ -931,7 +1093,9 @@ define void @caller_no_realign1024() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 1024
   call void @callee(ptr %1)
@@ -956,9 +1120,13 @@ define void @caller2048() {
 ; RV32I-NEXT:    addi a0, a0, 1
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -2032
+; RV32I-NEXT:    .cfi_def_cfa sp, 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller2048:
@@ -978,9 +1146,13 @@ define void @caller2048() {
 ; RV32I-ILP32E-NEXT:    addi a0, a0, 1
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -2044
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 2044
 ; RV32I-ILP32E-NEXT:    lw ra, 2040(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 2036(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 2044
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller2048:
@@ -1000,9 +1172,13 @@ define void @caller2048() {
 ; RV64I-NEXT:    addi a0, a0, 1
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -2032
+; RV64I-NEXT:    .cfi_def_cfa sp, 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller2048:
@@ -1022,9 +1198,13 @@ define void @caller2048() {
 ; RV64I-LP64E-NEXT:    addi a0, a0, 1
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -2040
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 2040
 ; RV64I-LP64E-NEXT:    ld ra, 2032(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 2024(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 2040
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 2048
   call void @callee(ptr %1)
@@ -1041,7 +1221,9 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign2048:
@@ -1053,7 +1235,9 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign2048:
@@ -1065,7 +1249,9 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign2048:
@@ -1077,7 +1263,9 @@ define void @caller_no_realign2048() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 2048
   call void @callee(ptr %1)
@@ -1104,9 +1292,13 @@ define void @caller4096() {
 ; RV32I-NEXT:    add a0, sp, a0
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    addi sp, s0, -2032
+; RV32I-NEXT:    .cfi_def_cfa sp, 2032
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
+; RV32I-NEXT:    .cfi_restore s0
 ; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller4096:
@@ -1128,9 +1320,13 @@ define void @caller4096() {
 ; RV32I-ILP32E-NEXT:    add a0, sp, a0
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    addi sp, s0, -2044
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa sp, 2044
 ; RV32I-ILP32E-NEXT:    lw ra, 2040(sp) # 4-byte Folded Reload
 ; RV32I-ILP32E-NEXT:    lw s0, 2036(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
+; RV32I-ILP32E-NEXT:    .cfi_restore s0
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 2044
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller4096:
@@ -1152,9 +1348,13 @@ define void @caller4096() {
 ; RV64I-NEXT:    add a0, sp, a0
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    addi sp, s0, -2032
+; RV64I-NEXT:    .cfi_def_cfa sp, 2032
 ; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
+; RV64I-NEXT:    .cfi_restore s0
 ; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller4096:
@@ -1176,9 +1376,13 @@ define void @caller4096() {
 ; RV64I-LP64E-NEXT:    add a0, sp, a0
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    addi sp, s0, -2040
+; RV64I-LP64E-NEXT:    .cfi_def_cfa sp, 2040
 ; RV64I-LP64E-NEXT:    ld ra, 2032(sp) # 8-byte Folded Reload
 ; RV64I-LP64E-NEXT:    ld s0, 2024(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
+; RV64I-LP64E-NEXT:    .cfi_restore s0
 ; RV64I-LP64E-NEXT:    addi sp, sp, 2040
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 4096
   call void @callee(ptr %1)
@@ -1195,7 +1399,9 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; RV32I-NEXT:    mv a0, sp
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    .cfi_restore ra
 ; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-ILP32E-LABEL: caller_no_realign4096:
@@ -1207,7 +1413,9 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; RV32I-ILP32E-NEXT:    mv a0, sp
 ; RV32I-ILP32E-NEXT:    call callee
 ; RV32I-ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT:    .cfi_restore ra
 ; RV32I-ILP32E-NEXT:    addi sp, sp, 8
+; RV32I-ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-ILP32E-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign4096:
@@ -1219,7 +1427,9 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    .cfi_restore ra
 ; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-LP64E-LABEL: caller_no_realign4096:
@@ -1231,7 +1441,9 @@ define void @caller_no_realign4096() "no-realign-stack" {
 ; RV64I-LP64E-NEXT:    mv a0, sp
 ; RV64I-LP64E-NEXT:    call callee
 ; RV64I-LP64E-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT:    .cfi_restore ra
 ; RV64I-LP64E-NEXT:    addi sp, sp, 16
+; RV64I-LP64E-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-LP64E-NEXT:    ret
   %1 = alloca i8, align 4096
   call void @callee(ptr %1)
diff --git a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
index 18bb4f5ad0f0c0..37eb0c34a0840a 100644
--- a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
@@ -25,7 +25,10 @@ define i32 @caller(i32 %a) {
 ; ILP32E-NEXT:    mv a0, s0
 ; ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
 ; ILP32E-NEXT:    lw s0, 0(sp) # 4-byte Folded Reload
+; ILP32E-NEXT:    .cfi_restore ra
+; ILP32E-NEXT:    .cfi_restore s0
 ; ILP32E-NEXT:    addi sp, sp, 8
+; ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: caller:
@@ -49,7 +52,11 @@ define i32 @caller(i32 %a) {
 ; ILP32E-WITHFP-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s1, 0(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
+; ILP32E-WITHFP-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 12
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 entry:
   call void (i32, ...) @va_double(i32 1, double 2.000000e+00)
@@ -82,7 +89,9 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-NEXT:    bnez a0, .LBB1_2
 ; ILP32E-NEXT:  # %bb.1: # %if.end
 ; ILP32E-NEXT:    lw ra, 4(sp) # 4-byte Folded Reload
+; ILP32E-NEXT:    .cfi_restore ra
 ; ILP32E-NEXT:    addi sp, sp, 32
+; ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-NEXT:    ret
 ; ILP32E-NEXT:  .LBB1_2: # %if.then
 ; ILP32E-NEXT:    call abort
@@ -117,7 +126,10 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-WITHFP-NEXT:  # %bb.1: # %if.end
 ; ILP32E-WITHFP-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 36
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ; ILP32E-WITHFP-NEXT:  .LBB1_2: # %if.then
 ; ILP32E-WITHFP-NEXT:    call abort
diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index d2c30c54390702..151e05c7443335 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -65,6 +65,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a1, sp, 24
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a1, 12(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 48
+; ILP32-ILP32F-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va1:
@@ -89,7 +90,10 @@ define i32 @va1(ptr %fmt, ...) {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, -12(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_restore ra
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va1:
@@ -107,6 +111,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a1, sp, 24
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a1, 12(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 48
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; ILP32E-FPELIM-LABEL: va1:
@@ -122,6 +127,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; ILP32E-FPELIM-NEXT:    addi a1, sp, 12
 ; ILP32E-FPELIM-NEXT:    sw a1, 0(sp)
 ; ILP32E-FPELIM-NEXT:    addi sp, sp, 28
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: va1:
@@ -144,7 +150,10 @@ define i32 @va1(ptr %fmt, ...) {
 ; ILP32E-WITHFP-NEXT:    sw a1, -12(s0)
 ; ILP32E-WITHFP-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 36
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va1:
@@ -162,6 +171,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va1:
@@ -186,7 +196,10 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_restore ra
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_restore s0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
 ;
 ; LP64E-FPELIM-LABEL: va1:
@@ -202,6 +215,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64E-FPELIM-NEXT:    sd a3, 32(sp)
 ; LP64E-FPELIM-NEXT:    sd a2, 24(sp)
 ; LP64E-FPELIM-NEXT:    addi sp, sp, 56
+; LP64E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; LP64E-FPELIM-NEXT:    ret
 ;
 ; LP64E-WITHFP-LABEL: va1:
@@ -224,7 +238,10 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64E-WITHFP-NEXT:    sd a2, 16(s0)
 ; LP64E-WITHFP-NEXT:    ld ra, 16(sp) # 8-byte Folded Reload
 ; LP64E-WITHFP-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    .cfi_restore ra
+; LP64E-WITHFP-NEXT:    .cfi_restore s0
 ; LP64E-WITHFP-NEXT:    addi sp, sp, 72
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; LP64E-WITHFP-NEXT:    ret
   %va = alloca ptr
   call void @llvm.va_start(ptr %va)
@@ -2820,6 +2837,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a1, 24414
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a1, a1, 304
 ; ILP32-ILP32F-FPELIM-NEXT:    add sp, sp, a1
+; ILP32-ILP32F-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va_large_stack:
@@ -2850,9 +2868,13 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a1, 24414
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, a1, -1728
 ; ILP32-ILP32F-WITHFP-NEXT:    add sp, sp, a1
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 1996(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 1992(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_restore ra
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 2032
+; ILP32-ILP32F-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va_large_stack:
@@ -2890,6 +2912,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a1, 24414
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a1, a1, 304
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    add sp, sp, a1
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; ILP32E-FPELIM-LABEL: va_large_stack:
@@ -2921,6 +2944,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; ILP32E-FPELIM-NEXT:    lui a1, 24414
 ; ILP32E-FPELIM-NEXT:    addi a1, a1, 288
 ; ILP32E-FPELIM-NEXT:    add sp, sp, a1
+; ILP32E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-FPELIM-NEXT:    ret
 ;
 ; ILP32E-WITHFP-LABEL: va_large_stack:
@@ -2949,9 +2973,13 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; ILP32E-WITHFP-NEXT:    lui a1, 24414
 ; ILP32E-WITHFP-NEXT:    addi a1, a1, -1748
 ; ILP32E-WITHFP-NEXT:    add sp, sp, a1
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 2044
 ; ILP32E-WITHFP-NEXT:    lw ra, 2016(sp) # 4-byte Folded Reload
 ; ILP32E-WITHFP-NEXT:    lw s0, 2012(sp) # 4-byte Folded Reload
+; ILP32E-WITHFP-NEXT:    .cfi_restore ra
+; ILP32E-WITHFP-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-NEXT:    addi sp, sp, 2044
+; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack:
@@ -2991,6 +3019,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a1, 24414
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a1, a1, 336
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    add sp, sp, a1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack:
@@ -3021,9 +3050,13 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a1, 24414
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a1, a1, -1680
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    add sp, sp, a1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 1960(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 1952(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_restore ra
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_restore s0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 2032
+; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
 ;
 ; LP64E-FPELIM-LABEL: va_large_stack:
@@ -3057,6 +3090,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64E-FPELIM-NEXT:    lui a1, 24414
 ; LP64E-FPELIM-NEXT:    addiw a1, a1, 320
 ; LP64E-FPELIM-NEXT:    add sp, sp, a1
+; LP64E-FPELIM-NEXT:    .cfi_def_cfa_offset 0
 ; LP64E-FPELIM-NEXT:    ret
 ;
 ; LP64E-WITHFP-LABEL: va_large_stack:
@@ -3085,9 +3119,13 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64E-WITHFP-NEXT:    lui a1, 24414
 ; LP64E-WITHFP-NEXT:    addiw a1, a1, -1704
 ; LP64E-WITHFP-NEXT:    add sp, sp, a1
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa_offset 2040
 ; LP64E-WITHFP-NEXT:    ld ra, 1984(sp) # 8-byte Folded Reload
 ; LP64E-WITHFP-NEXT:    ld s0, 1976(sp) # 8-byte Folded Reload
+; LP64E-WITHFP-NEXT:    .cfi_restore ra
+; LP64E-WITHFP-NEXT:    .cfi_restore s0
 ; LP64E-WITHFP-NEXT:    addi sp, sp, 2040
+; LP64E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; LP64E-WITHFP-NEXT:    ret
   %large = alloca [ 100000000 x i8 ]
   %va = alloca ptr
diff --git a/llvm/test/CodeGen/RISCV/vlenb.ll b/llvm/test/CodeGen/RISCV/vlenb.ll
index 26d4f99c3b9792..280df6545fd069 100644
--- a/llvm/test/CodeGen/RISCV/vlenb.ll
+++ b/llvm/test/CodeGen/RISCV/vlenb.ll
@@ -57,7 +57,10 @@ define i32 @sink_to_use_call() {
 ; CHECK-NEXT:    mv a0, s0
 ; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
 ; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %v1 = call i32 @llvm.read_register.i32(metadata !0)
diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index b1efe53290e8ee..572b74cc2499f7 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -1374,7 +1374,10 @@ define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32-NEXT:    mv a0, a1
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo.i64:
@@ -1445,7 +1448,10 @@ define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32ZBA-NEXT:    mv a0, a1
 ; RV32ZBA-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZBA-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore s0
+; RV32ZBA-NEXT:    .cfi_restore s1
 ; RV32ZBA-NEXT:    addi sp, sp, 16
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo.i64:
@@ -1516,7 +1522,10 @@ define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32ZICOND-NEXT:    mv a0, a1
 ; RV32ZICOND-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32ZICOND-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    .cfi_restore s0
+; RV32ZICOND-NEXT:    .cfi_restore s1
 ; RV32ZICOND-NEXT:    addi sp, sp, 16
+; RV32ZICOND-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: smulo.i64:
@@ -3378,7 +3387,9 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    mv a1, a3
 ; RV32-NEXT:  .LBB46_2: # %entry
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo.select.i64:
@@ -3447,7 +3458,9 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    mv a1, a3
 ; RV32ZBA-NEXT:  .LBB46_2: # %entry
 ; RV32ZBA-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore s0
 ; RV32ZBA-NEXT:    addi sp, sp, 16
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo.select.i64:
@@ -3517,7 +3530,9 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    czero.eqz a1, a1, a4
 ; RV32ZICOND-NEXT:    or a1, a1, a2
 ; RV32ZICOND-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    .cfi_restore s0
 ; RV32ZICOND-NEXT:    addi sp, sp, 16
+; RV32ZICOND-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: smulo.select.i64:
@@ -3589,7 +3604,9 @@ define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    seqz a0, a0
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo.not.i64:
@@ -3652,7 +3669,9 @@ define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    seqz a0, a0
 ; RV32ZBA-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore s0
 ; RV32ZBA-NEXT:    addi sp, sp, 16
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo.not.i64:
@@ -3715,7 +3734,9 @@ define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    or a0, a1, a0
 ; RV32ZICOND-NEXT:    seqz a0, a0
 ; RV32ZICOND-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    .cfi_restore s0
 ; RV32ZICOND-NEXT:    addi sp, sp, 16
+; RV32ZICOND-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: smulo.not.i64:
@@ -4950,7 +4971,9 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:  .LBB61_3: # %overflow
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo.br.i64:
@@ -5023,7 +5046,9 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:  .LBB61_3: # %overflow
 ; RV32ZBA-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    .cfi_restore s0
 ; RV32ZBA-NEXT:    addi sp, sp, 16
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo.br.i64:
@@ -5096,7 +5121,9 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    li a0, 1
 ; RV32ZICOND-NEXT:  .LBB61_3: # %overflow
 ; RV32ZICOND-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    .cfi_restore s0
 ; RV32ZICOND-NEXT:    addi sp, sp, 16
+; RV32ZICOND-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: smulo.br.i64:
diff --git a/llvm/test/CodeGen/RISCV/xcvbi.ll b/llvm/test/CodeGen/RISCV/xcvbi.ll
index afd30faa56f90b..ca2e416e334f01 100644
--- a/llvm/test/CodeGen/RISCV/xcvbi.ll
+++ b/llvm/test/CodeGen/RISCV/xcvbi.ll
@@ -76,6 +76,7 @@ define i32 @select_beqimm_1(i32 %a, i32 %x, i32 %y) {
 ; CHECK_NOPT-NEXT:  .LBB2_2: # %entry
 ; CHECK_NOPT-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK_NOPT-NEXT:    addi sp, sp, 16
+; CHECK_NOPT-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK_NOPT-NEXT:    ret
 ;
 ; CHECK_OPT-LABEL: select_beqimm_1:
@@ -106,6 +107,7 @@ define i32 @select_beqimm_2(i32 %a, i32 %x, i32 %y) {
 ; CHECK_NOPT-NEXT:  .LBB3_2: # %entry
 ; CHECK_NOPT-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK_NOPT-NEXT:    addi sp, sp, 16
+; CHECK_NOPT-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK_NOPT-NEXT:    ret
 ;
 ; CHECK_OPT-LABEL: select_beqimm_2:
@@ -136,6 +138,7 @@ define i32 @select_beqimm_3(i32 %a, i32 %x, i32 %y) {
 ; CHECK_NOPT-NEXT:  .LBB4_2: # %entry
 ; CHECK_NOPT-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK_NOPT-NEXT:    addi sp, sp, 16
+; CHECK_NOPT-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK_NOPT-NEXT:    ret
 ;
 ; CHECK_OPT-LABEL: select_beqimm_3:
@@ -167,6 +170,7 @@ define i32 @select_no_beqimm_1(i32 %a, i32 %x, i32 %y) {
 ; CHECK_NOPT-NEXT:  .LBB5_2: # %entry
 ; CHECK_NOPT-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK_NOPT-NEXT:    addi sp, sp, 16
+; CHECK_NOPT-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK_NOPT-NEXT:    ret
 ;
 ; CHECK_OPT-LABEL: select_no_beqimm_1:
@@ -199,6 +203,7 @@ define i32 @select_no_beqimm_2(i32 %a, i32 %x, i32 %y) {
 ; CHECK_NOPT-NEXT:  .LBB6_2: # %entry
 ; CHECK_NOPT-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK_NOPT-NEXT:    addi sp, sp, 16
+; CHECK_NOPT-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK_NOPT-NEXT:    ret
 ;
 ; CHECK_OPT-LABEL: select_no_beqimm_2:
@@ -230,6 +235,7 @@ define i32 @select_bneimm_1(i32 %a, i32 %x, i32 %y) {
 ; CHECK_NOPT-NEXT:  .LBB7_2: # %entry
 ; CHECK_NOPT-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK_NOPT-NEXT:    addi sp, sp, 16
+; CHECK_NOPT-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK_NOPT-NEXT:    ret
 ;
 ; CHECK_OPT-LABEL: select_bneimm_1:
diff --git a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
index 6d1521c719edd3..c30945a75461af 100644
--- a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
@@ -229,7 +229,12 @@ define i1 @flo(float %c, float %a, float %b) {
 ; CHECK-RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64I-NEXT:    .cfi_restore ra
+; CHECK-RV64I-NEXT:    .cfi_restore s0
+; CHECK-RV64I-NEXT:    .cfi_restore s1
+; CHECK-RV64I-NEXT:    .cfi_restore s2
 ; CHECK-RV64I-NEXT:    addi sp, sp, 32
+; CHECK-RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64I-NEXT:    ret
 ;
 ; CHECK-RV64IF-LABEL: flo:
@@ -275,7 +280,12 @@ define i1 @dlo(double %c, double %a, double %b) {
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
+; CHECK-NEXT:    .cfi_restore s2
 ; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %l0 = fcmp ult double %a, %c
   %l1 = fcmp ult double %b, %c
diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
index 73ace203398501..ab72a99570d92d 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
@@ -33,7 +33,13 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
 ; RV32-NEXT:    sb a0, 0(s0)
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    addi sp, sp, 8
-; RV32-NEXT:    cm.popret {ra, s0-s1}, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    cm.pop {ra, s0-s1}, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
+; RV32-NEXT:    ret
 entry:
   br label %while.body
 
diff --git a/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir b/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
index 93931ff950a8c0..b465f5a9567371 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
+++ b/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
@@ -25,7 +25,11 @@ body:                   |
     ; CHECK-ZCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -4
     ; CHECK-ZCMP32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP32-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP32-NEXT: CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP32-NEXT: frame-destroy CM_POP 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-ZCMP32-NEXT: PseudoRET
     ;
     ; CHECK-LIBCALL32-LABEL: name: popret_rvlist5
     ; CHECK-LIBCALL32: liveins: $x1, $x8
@@ -36,6 +40,8 @@ body:                   |
     ; CHECK-LIBCALL32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
     ; CHECK-LIBCALL32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL32-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2
     ;
     ; CHECK-ZCMP64-LABEL: name: popret_rvlist5
@@ -47,7 +53,11 @@ body:                   |
     ; CHECK-ZCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
     ; CHECK-ZCMP64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP64-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP64-NEXT: CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP64-NEXT: frame-destroy CM_POP 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-ZCMP64-NEXT: PseudoRET
     ;
     ; CHECK-LIBCALL64-LABEL: name: popret_rvlist5
     ; CHECK-LIBCALL64: liveins: $x1, $x8
@@ -58,6 +68,8 @@ body:                   |
     ; CHECK-LIBCALL64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -16
     ; CHECK-LIBCALL64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL64-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2
     ;
     ; CHECK-NO-ZCMP32-LABEL: name: popret_rvlist5
@@ -73,7 +85,10 @@ body:                   |
     ; CHECK-NO-ZCMP32-NEXT: $x8 = IMPLICIT_DEF
     ; CHECK-NO-ZCMP32-NEXT: $x1 = LW $x2, 12 :: (load (s32) from %stack.0)
     ; CHECK-NO-ZCMP32-NEXT: $x8 = LW $x2, 8 :: (load (s32) from %stack.1)
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-NO-ZCMP32-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NO-ZCMP32-NEXT: PseudoRET
     ;
     ; CHECK-NO-ZCMP64-LABEL: name: popret_rvlist5
@@ -89,7 +104,10 @@ body:                   |
     ; CHECK-NO-ZCMP64-NEXT: $x8 = IMPLICIT_DEF
     ; CHECK-NO-ZCMP64-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.0)
     ; CHECK-NO-ZCMP64-NEXT: $x8 = LD $x2, 0 :: (load (s64) from %stack.1)
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-NO-ZCMP64-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NO-ZCMP64-NEXT: PseudoRET
     $x1 = IMPLICIT_DEF
     $x8 = IMPLICIT_DEF
@@ -109,7 +127,12 @@ body:                   |
     ; CHECK-ZCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -4
     ; CHECK-ZCMP32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP32-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP32-NEXT: CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP32-NEXT: $x10 = ADDI $x0, 0
+    ; CHECK-ZCMP32-NEXT: frame-destroy CM_POP 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-ZCMP32-NEXT: PseudoRET implicit $x10
     ;
     ; CHECK-LIBCALL32-LABEL: name: popretz_rvlist5
     ; CHECK-LIBCALL32: liveins: $x1, $x8
@@ -121,6 +144,8 @@ body:                   |
     ; CHECK-LIBCALL32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x8 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x10 = ADDI $x0, 0
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL32-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2, implicit $x10
     ;
     ; CHECK-ZCMP64-LABEL: name: popretz_rvlist5
@@ -132,7 +157,12 @@ body:                   |
     ; CHECK-ZCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
     ; CHECK-ZCMP64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP64-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP64-NEXT: CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP64-NEXT: $x10 = ADDI $x0, 0
+    ; CHECK-ZCMP64-NEXT: frame-destroy CM_POP 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-ZCMP64-NEXT: PseudoRET implicit $x10
     ;
     ; CHECK-LIBCALL64-LABEL: name: popretz_rvlist5
     ; CHECK-LIBCALL64: liveins: $x1, $x8
@@ -144,6 +174,8 @@ body:                   |
     ; CHECK-LIBCALL64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x8 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x10 = ADDI $x0, 0
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL64-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2, implicit $x10
     ;
     ; CHECK-NO-ZCMP32-LABEL: name: popretz_rvlist5
@@ -160,7 +192,10 @@ body:                   |
     ; CHECK-NO-ZCMP32-NEXT: $x10 = ADDI $x0, 0
     ; CHECK-NO-ZCMP32-NEXT: $x1 = LW $x2, 12 :: (load (s32) from %stack.0)
     ; CHECK-NO-ZCMP32-NEXT: $x8 = LW $x2, 8 :: (load (s32) from %stack.1)
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-NO-ZCMP32-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NO-ZCMP32-NEXT: PseudoRET implicit $x10
     ;
     ; CHECK-NO-ZCMP64-LABEL: name: popretz_rvlist5
@@ -177,7 +212,10 @@ body:                   |
     ; CHECK-NO-ZCMP64-NEXT: $x10 = ADDI $x0, 0
     ; CHECK-NO-ZCMP64-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.0)
     ; CHECK-NO-ZCMP64-NEXT: $x8 = LD $x2, 0 :: (load (s64) from %stack.1)
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-NO-ZCMP64-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NO-ZCMP64-NEXT: PseudoRET implicit $x10
     $x1 = IMPLICIT_DEF
     $x8 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir b/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir
index db474b51432c74..5c516e9bb4bcc3 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir
+++ b/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir
@@ -48,6 +48,20 @@ body:                   |
     ; CHECK-ZCMP32-NEXT: $x26 = IMPLICIT_DEF
     ; CHECK-ZCMP32-NEXT: $x27 = IMPLICIT_DEF
     ; CHECK-ZCMP32-NEXT: frame-destroy CM_POP 15, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8, implicit-def $x9, implicit-def $x18, implicit-def $x19, implicit-def $x20, implicit-def $x21, implicit-def $x22, implicit-def $x23, implicit-def $x24, implicit-def $x25, implicit-def $x26, implicit-def $x27
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
+    ; CHECK-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-ZCMP32-NEXT: PseudoRET
     ;
     ; CHECK-LIBCALL32-LABEL: name: push_rvlist15
@@ -81,6 +95,19 @@ body:                   |
     ; CHECK-LIBCALL32-NEXT: $x25 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x26 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x27 = IMPLICIT_DEF
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
+    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-LIBCALL32-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_12, implicit $x2
     ;
     ; CHECK-ZCMP64-LABEL: name: push_rvlist15
@@ -115,6 +142,20 @@ body:                   |
     ; CHECK-ZCMP64-NEXT: $x26 = IMPLICIT_DEF
     ; CHECK-ZCMP64-NEXT: $x27 = IMPLICIT_DEF
     ; CHECK-ZCMP64-NEXT: frame-destroy CM_POP 15, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8, implicit-def $x9, implicit-def $x18, implicit-def $x19, implicit-def $x20, implicit-def $x21, implicit-def $x22, implicit-def $x23, implicit-def $x24, implicit-def $x25, implicit-def $x26, implicit-def $x27
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
+    ; CHECK-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-ZCMP64-NEXT: PseudoRET
     ;
     ; CHECK-LIBCALL64-LABEL: name: push_rvlist15
@@ -148,6 +189,19 @@ body:                   |
     ; CHECK-LIBCALL64-NEXT: $x25 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x26 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x27 = IMPLICIT_DEF
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
+    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-LIBCALL64-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_12, implicit $x2
     ;
     ; CHECK-NO-ZCMP32-LABEL: name: push_rvlist15
@@ -207,7 +261,21 @@ body:                   |
     ; CHECK-NO-ZCMP32-NEXT: $x25 = LW $x2, 20 :: (load (s32) from %stack.10)
     ; CHECK-NO-ZCMP32-NEXT: $x26 = LW $x2, 16 :: (load (s32) from %stack.11)
     ; CHECK-NO-ZCMP32-NEXT: $x27 = LW $x2, 12 :: (load (s32) from %stack.12)
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-NO-ZCMP32-NEXT: $x2 = frame-destroy ADDI $x2, 64
+    ; CHECK-NO-ZCMP32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NO-ZCMP32-NEXT: PseudoRET
     ;
     ; CHECK-NO-ZCMP64-LABEL: name: push_rvlist15
@@ -267,7 +335,21 @@ body:                   |
     ; CHECK-NO-ZCMP64-NEXT: $x25 = LD $x2, 24 :: (load (s64) from %stack.10)
     ; CHECK-NO-ZCMP64-NEXT: $x26 = LD $x2, 16 :: (load (s64) from %stack.11)
     ; CHECK-NO-ZCMP64-NEXT: $x27 = LD $x2, 8 :: (load (s64) from %stack.12)
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-NO-ZCMP64-NEXT: $x2 = frame-destroy ADDI $x2, 112
+    ; CHECK-NO-ZCMP64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
     ; CHECK-NO-ZCMP64-NEXT: PseudoRET
     $x1 = IMPLICIT_DEF
     $x8 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.mir b/llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.mir
index 64556ec0b343af..6e5ac31f192dea 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.mir
+++ b/llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.mir
@@ -99,6 +99,7 @@ body:             |
   ; CHECK-NEXT:   SB killed renamable $x12, $x2, 14 :: (store (s8) into %stack.0 + 14)
   ; CHECK-NEXT:   SB killed renamable $x13, $x2, 13 :: (store (s8) into %stack.0 + 13)
   ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 32
+  ; CHECK-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.exit:
   ; CHECK-NEXT:   PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
index 3eb91eb9a09c47..712e49b08911b8 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
@@ -17,7 +17,11 @@ define float @foo(float %arg) {
 ; RV32-NEXT:    call callee
 ; RV32-NEXT:    fmv.s fa0, fs0
 ; RV32-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    cm.popret {ra}, 32
+; RV32-NEXT:    cm.pop {ra}, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore fs0
+; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: foo:
 ; RV64:       # %bb.0: # %entry
@@ -30,7 +34,11 @@ define float @foo(float %arg) {
 ; RV64-NEXT:    call callee
 ; RV64-NEXT:    fmv.s fa0, fs0
 ; RV64-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
-; RV64-NEXT:    cm.popret {ra}, 32
+; RV64-NEXT:    cm.pop {ra}, 32
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore fs0
+; RV64-NEXT:    ret
 entry:
   call void @callee()
   ret float %arg
@@ -52,6 +60,10 @@ define void @foo2(i32 %x, float %y) {
 ; RV32-NEXT:    fmv.s fa0, fs0
 ; RV32-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    cm.pop {ra, s0}, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore fs0
 ; RV32-NEXT:    tail func
 ;
 ; RV64-LABEL: foo2:
@@ -69,6 +81,10 @@ define void @foo2(i32 %x, float %y) {
 ; RV64-NEXT:    fmv.s fa0, fs0
 ; RV64-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV64-NEXT:    cm.pop {ra, s0}, 32
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore fs0
 ; RV64-NEXT:    tail func
 entry:
   tail call void @bar()
diff --git a/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
index 8596a65c378c6c..caebdab2c95ab1 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
+++ b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
@@ -38,6 +38,7 @@
   ; CHECK-NEXT:    lw a7, 2036(sp) # 4-byte Folded Reload
   ; CHECK-NEXT:    addi sp, sp, 2032
   ; CHECK-NEXT:    addi sp, sp, 32
+  ; CHECK-NEXT:    .cfi_def_cfa_offset 0
   ; CHECK-NEXT:    ret
     ret void
   }
diff --git a/llvm/test/DebugInfo/RISCV/relax-debug-frame.ll b/llvm/test/DebugInfo/RISCV/relax-debug-frame.ll
index ffef0ec2340684..8ab1a67eb1b384 100644
--- a/llvm/test/DebugInfo/RISCV/relax-debug-frame.ll
+++ b/llvm/test/DebugInfo/RISCV/relax-debug-frame.ll
@@ -4,18 +4,22 @@
 ; RUN:     | FileCheck -check-prefix=RELAX-DWARFDUMP %s
 ;
 ; RELAX:      Section ({{.*}}) .rela.eh_frame {
-; RELAX-NEXT:   0x1C R_RISCV_32_PCREL .L0  0x0
-; RELAX-NEXT:   0x30 R_RISCV_32_PCREL .L0  0x0
-; RELAX-NEXT:   0x44 R_RISCV_32_PCREL .L0  0x0
-; RELAX-NEXT:   0x48 R_RISCV_ADD32 .L0  0x0
-; RELAX-NEXT:   0x48 R_RISCV_SUB32 .L0  0x0
-; RELAX-NEXT:  }
+; REALX-NEXT:   0x1C R_RISCV_32_PCREL .L0 0x0 
+; REALX-NEXT:   0x30 R_RISCV_32_PCREL .L0 0x0 
+; REALX-NEXT:   0x48 R_RISCV_32_PCREL .L0 0x0 
+; REALX-NEXT:   0x4C R_RISCV_ADD32 .L0 0x0 
+; REALX-NEXT:   0x4C R_RISCV_SUB32 .L0 0x0 
+; REALX-NEXT:   0x57 R_RISCV_SET6 .L0 0x0 
+; RELAX-NEXT-EMPTY:
 
 ; RELAX-DWARFDUMP-NOT: error: failed to compute relocation
 ; RELAX-DWARFDUMP:      FDE
 ; RELAX-DWARFDUMP-NEXT: Format:
 ; RELAX-DWARFDUMP:      DW_CFA_advance_loc: 4
 ; RELAX-DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +16
+; RELAX-DWARFDUMP-NEXT: DW_CFA_advance_loc: 8
+; RELAX-DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +0
+; RELAX-DWARFDUMP-NEXT: DW_CFA_nop:
 ; RELAX-DWARFDUMP-EMPTY:
 
 ; RELAX-DWARFDUMP:      FDE
@@ -24,7 +28,10 @@
 ; RELAX-DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +16
 ; RELAX-DWARFDUMP-NEXT: DW_CFA_advance_loc: 4
 ; RELAX-DWARFDUMP-NEXT: DW_CFA_offset: X1 -4
-; RELAX-DWARFDUMP-NEXT: DW_CFA_nop
+; RELAX-DWARFDUMP-NEXT: DW_CFA_advance_loc: 28
+; RELAX-DWARFDUMP-NEXT: DW_CFA_restore: X1
+; RELAX-DWARFDUMP-NEXT: DW_CFA_advance_loc: 4
+; RELAX-DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +0
 ; RELAX-DWARFDUMP-EMPTY:
 source_filename = "frame.c"
 
@@ -66,3 +73,4 @@ entry:
 !3 = !{i32 2, !"Dwarf Version", i32 4}
 !4 = !{i32 2, !"Debug Info Version", i32 3}
 !5 = !{i32 1, !"wchar_size", i32 4}
+

>From aa47ba22404019deec7f916a8eade1dfbc28e933 Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Mon, 30 Sep 2024 15:01:02 +0000
Subject: [PATCH 3/7] [RISCV][CFI] enable CFIFixup pass in RISCVTargetMachine

---
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index b9d35a924669f1..4e29b43c9949e3 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -170,6 +170,8 @@ RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
 
   if (TT.isOSFuchsia() && !TT.isArch64Bit())
     report_fatal_error("Fuchsia is only supported for 64-bit");
+
+  setCFIFixup(true);
 }
 
 const RISCVSubtarget *

>From 3a743271b5567caaac82dd7bfabb4ed2148da2ec Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Wed, 2 Oct 2024 12:41:48 +0300
Subject: [PATCH 4/7] [RISCV][CFI] CFIFixup update tests

---
 llvm/test/CodeGen/RISCV/O0-pipeline.ll        |  1 +
 llvm/test/CodeGen/RISCV/O3-pipeline.ll        |  1 +
 llvm/test/CodeGen/RISCV/branch-relaxation.ll  |  4 +++
 .../RISCV/exception-pointer-register.ll       |  4 +++
 llvm/test/CodeGen/RISCV/lpad.ll               |  8 ++++++
 llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll |  2 ++
 .../RISCV/rvv/fixed-vectors-masked-scatter.ll | 26 +++++++++++++++++++
 .../CodeGen/RISCV/rvv/fpclamptosat_vec.ll     | 24 +++++++++++++++++
 llvm/test/CodeGen/RISCV/vararg-ilp32e.ll      |  4 +++
 9 files changed, 74 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
index 7b85e9c77a4232..f60def9d546f81 100644
--- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
@@ -68,6 +68,7 @@
 ; CHECK-NEXT:       StackMap Liveness Analysis
 ; CHECK-NEXT:       Live DEBUG_VALUE analysis
 ; CHECK-NEXT:       Machine Sanitizer Binary Metadata
+; CHECK-NEXT:       Insert CFI remember/restore state instructions
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       Machine Optimization Remark Emitter
 ; CHECK-NEXT:       Stack Frame Layout Analysis
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index c29f15a15c1503..e68f32bb6f73f9 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -196,6 +196,7 @@
 ; CHECK-NEXT:       Machine Sanitizer Binary Metadata
 ; CHECK-NEXT:     Machine Outliner
 ; CHECK-NEXT:     FunctionPass Manager
+; CHECK-NEXT:       Insert CFI remember/restore state instructions
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       Machine Optimization Remark Emitter
 ; CHECK-NEXT:       Stack Frame Layout Analysis
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
index 8da0ec7a5f2d74..b1efeaa2be2d99 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
@@ -2789,6 +2789,7 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV32-NEXT:    .cfi_offset s9, -44
 ; CHECK-RV32-NEXT:    .cfi_offset s10, -48
 ; CHECK-RV32-NEXT:    .cfi_offset s11, -52
+; CHECK-RV32-NEXT:    .cfi_remember_state
 ; CHECK-RV32-NEXT:    #APP
 ; CHECK-RV32-NEXT:    li ra, 1
 ; CHECK-RV32-NEXT:    #NO_APP
@@ -3006,6 +3007,7 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV32-NEXT:    ret
 ; CHECK-RV32-NEXT:  .LBB6_5: # %cond_3
+; CHECK-RV32-NEXT:    .cfi_restore_state
 ; CHECK-RV32-NEXT:    beq t1, t2, .LBB6_4
 ; CHECK-RV32-NEXT:  # %bb.6: # %space
 ; CHECK-RV32-NEXT:    #APP
@@ -3045,6 +3047,7 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    .cfi_offset s9, -88
 ; CHECK-RV64-NEXT:    .cfi_offset s10, -96
 ; CHECK-RV64-NEXT:    .cfi_offset s11, -104
+; CHECK-RV64-NEXT:    .cfi_remember_state
 ; CHECK-RV64-NEXT:    #APP
 ; CHECK-RV64-NEXT:    li ra, 1
 ; CHECK-RV64-NEXT:    #NO_APP
@@ -3267,6 +3270,7 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
 ; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-RV64-NEXT:    ret
 ; CHECK-RV64-NEXT:  .LBB6_5: # %cond_3
+; CHECK-RV64-NEXT:    .cfi_restore_state
 ; CHECK-RV64-NEXT:    sext.w t5, t2
 ; CHECK-RV64-NEXT:    sext.w t6, t1
 ; CHECK-RV64-NEXT:    beq t6, t5, .LBB6_4
diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
index 851af342b391d8..c46ce8801702be 100644
--- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
+++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
@@ -23,6 +23,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    .cfi_offset s0, -8
 ; RV32I-NEXT:    .cfi_offset s1, -12
+; RV32I-NEXT:    .cfi_remember_state
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    beqz a0, .LBB0_2
 ; RV32I-NEXT:  # %bb.1: # %bb2
@@ -47,6 +48,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV32I-NEXT:    .cfi_def_cfa_offset 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_4: # %lpad
+; RV32I-NEXT:    .cfi_restore_state
 ; RV32I-NEXT:  .Ltmp4:
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
@@ -64,6 +66,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    .cfi_offset s0, -16
 ; RV64I-NEXT:    .cfi_offset s1, -24
+; RV64I-NEXT:    .cfi_remember_state
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    beqz a0, .LBB0_2
 ; RV64I-NEXT:  # %bb.1: # %bb2
@@ -88,6 +91,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
 ; RV64I-NEXT:    .cfi_def_cfa_offset 0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB0_4: # %lpad
+; RV64I-NEXT:    .cfi_restore_state
 ; RV64I-NEXT:  .Ltmp4:
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
diff --git a/llvm/test/CodeGen/RISCV/lpad.ll b/llvm/test/CodeGen/RISCV/lpad.ll
index 87e76ba0f5e5ae..f5d06f0924c542 100644
--- a/llvm/test/CodeGen/RISCV/lpad.ll
+++ b/llvm/test/CodeGen/RISCV/lpad.ll
@@ -144,6 +144,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_remember_state
 ; RV32-NEXT:  .Ltmp0:
 ; RV32-NEXT:    jalr a0
 ; RV32-NEXT:  .Ltmp1:
@@ -154,6 +155,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB2_2: # %lpad
+; RV32-NEXT:    .cfi_restore_state
 ; RV32-NEXT:  .Ltmp2:
 ; RV32-NEXT:    j .LBB2_1
 ;
@@ -164,6 +166,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_remember_state
 ; RV64-NEXT:  .Ltmp0:
 ; RV64-NEXT:    jalr a0
 ; RV64-NEXT:  .Ltmp1:
@@ -174,6 +177,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB2_2: # %lpad
+; RV64-NEXT:    .cfi_restore_state
 ; RV64-NEXT:  .Ltmp2:
 ; RV64-NEXT:    j .LBB2_1
 ;
@@ -184,6 +188,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; FIXED-ONE-RV32-NEXT:    .cfi_def_cfa_offset 16
 ; FIXED-ONE-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; FIXED-ONE-RV32-NEXT:    .cfi_offset ra, -4
+; FIXED-ONE-RV32-NEXT:    .cfi_remember_state
 ; FIXED-ONE-RV32-NEXT:  .Ltmp0:
 ; FIXED-ONE-RV32-NEXT:    lui t2, 1
 ; FIXED-ONE-RV32-NEXT:    jalr a0
@@ -195,6 +200,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; FIXED-ONE-RV32-NEXT:    .cfi_def_cfa_offset 0
 ; FIXED-ONE-RV32-NEXT:    ret
 ; FIXED-ONE-RV32-NEXT:  .LBB2_2: # %lpad
+; FIXED-ONE-RV32-NEXT:    .cfi_restore_state
 ; FIXED-ONE-RV32-NEXT:  .Ltmp2:
 ; FIXED-ONE-RV32-NEXT:    j .LBB2_1
 ;
@@ -205,6 +211,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; FIXED-ONE-RV64-NEXT:    .cfi_def_cfa_offset 16
 ; FIXED-ONE-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; FIXED-ONE-RV64-NEXT:    .cfi_offset ra, -8
+; FIXED-ONE-RV64-NEXT:    .cfi_remember_state
 ; FIXED-ONE-RV64-NEXT:  .Ltmp0:
 ; FIXED-ONE-RV64-NEXT:    lui t2, 1
 ; FIXED-ONE-RV64-NEXT:    jalr a0
@@ -216,6 +223,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
 ; FIXED-ONE-RV64-NEXT:    .cfi_def_cfa_offset 0
 ; FIXED-ONE-RV64-NEXT:    ret
 ; FIXED-ONE-RV64-NEXT:  .LBB2_2: # %lpad
+; FIXED-ONE-RV64-NEXT:    .cfi_restore_state
 ; FIXED-ONE-RV64-NEXT:  .Ltmp2:
 ; FIXED-ONE-RV64-NEXT:    j .LBB2_1
 entry:
diff --git a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
index 71fad9180d304f..a5847365159a84 100644
--- a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
+++ b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
@@ -24,6 +24,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    .cfi_offset s1, -24
 ; CHECK-NEXT:    addi s0, sp, 32
 ; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    .cfi_remember_state
 ; CHECK-NEXT:  .Ltmp0:
 ; CHECK-NEXT:    addi sp, sp, -32
 ; CHECK-NEXT:    li a0, 0
@@ -60,6 +61,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_4: # %ehcleanup
+; CHECK-NEXT:    .cfi_restore_state
 ; CHECK-NEXT:    call _Unwind_Resume
 entry:
   invoke void @_Z3fooiiiiiiiiiiPi(i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 poison, i32 poison, i32 poison)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index adf49375cba3ea..d66e6957a82183 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -3156,6 +3156,7 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a1, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a2, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a5, 40(a0)
@@ -3212,6 +3213,7 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB41_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw s1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -3280,6 +3282,7 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
 ; RV64ZVE32F-NEXT:    .cfi_offset s0, -8
 ; RV64ZVE32F-NEXT:    .cfi_offset s1, -16
 ; RV64ZVE32F-NEXT:    .cfi_offset s2, -24
+; RV64ZVE32F-NEXT:    .cfi_remember_state
 ; RV64ZVE32F-NEXT:    ld a4, 40(a1)
 ; RV64ZVE32F-NEXT:    ld a3, 48(a1)
 ; RV64ZVE32F-NEXT:    ld a2, 56(a1)
@@ -3332,6 +3335,7 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
 ; RV64ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB41_10: # %cond.store
+; RV64ZVE32F-NEXT:    .cfi_restore_state
 ; RV64ZVE32F-NEXT:    ld a1, 0(a1)
 ; RV64ZVE32F-NEXT:    ld a0, 0(a0)
 ; RV64ZVE32F-NEXT:    sd a0, 0(a1)
@@ -3394,6 +3398,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -3455,6 +3460,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB42_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -3642,6 +3648,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -3703,6 +3710,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB43_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -3892,6 +3900,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -3953,6 +3962,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB44_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -4149,6 +4159,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -4210,6 +4221,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB45_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -4398,6 +4410,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -4459,6 +4472,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB46_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -4649,6 +4663,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -4710,6 +4725,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB47_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -4908,6 +4924,7 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -4968,6 +4985,7 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB48_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -5155,6 +5173,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -5215,6 +5234,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB49_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -5403,6 +5423,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_offset s0, -4
 ; RV32ZVE32F-NEXT:    .cfi_offset s1, -8
 ; RV32ZVE32F-NEXT:    .cfi_offset s2, -12
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a2, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 40(a0)
@@ -5463,6 +5484,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB50_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -5671,6 +5693,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
 ; RV32ZVE32F-NEXT:    .cfi_offset s6, -28
 ; RV32ZVE32F-NEXT:    .cfi_offset s7, -32
 ; RV32ZVE32F-NEXT:    .cfi_offset s8, -36
+; RV32ZVE32F-NEXT:    .cfi_remember_state
 ; RV32ZVE32F-NEXT:    lw a3, 56(a0)
 ; RV32ZVE32F-NEXT:    lw a4, 60(a0)
 ; RV32ZVE32F-NEXT:    lw a7, 40(a0)
@@ -5759,6 +5782,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
 ; RV32ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB51_10: # %cond.store
+; RV32ZVE32F-NEXT:    .cfi_restore_state
 ; RV32ZVE32F-NEXT:    lw a1, 4(a0)
 ; RV32ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV32ZVE32F-NEXT:    vmv.x.s s2, v8
@@ -5828,6 +5852,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    .cfi_offset s1, -16
 ; RV64ZVE32F-NEXT:    .cfi_offset s2, -24
 ; RV64ZVE32F-NEXT:    .cfi_offset s3, -32
+; RV64ZVE32F-NEXT:    .cfi_remember_state
 ; RV64ZVE32F-NEXT:    ld a5, 40(a0)
 ; RV64ZVE32F-NEXT:    ld a4, 48(a0)
 ; RV64ZVE32F-NEXT:    ld a3, 56(a0)
@@ -5884,6 +5909,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
 ; RV64ZVE32F-NEXT:    .cfi_def_cfa_offset 0
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB51_10: # %cond.store
+; RV64ZVE32F-NEXT:    .cfi_restore_state
 ; RV64ZVE32F-NEXT:    ld a2, 0(a2)
 ; RV64ZVE32F-NEXT:    ld a0, 0(a0)
 ; RV64ZVE32F-NEXT:    slli a2, a2, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 6cf449434a22b1..9b4cc2fa901068 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -340,6 +340,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
 ; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
 ; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 0(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 8(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 16(a1)
@@ -404,6 +405,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB6_10: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a4
 ; CHECK-NOV-NEXT:    fcvt.l.s a2, fs1, rtz
 ; CHECK-NOV-NEXT:    blt s1, a4, .LBB6_2
@@ -538,6 +540,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
 ; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
 ; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 0(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 8(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 16(a1)
@@ -594,6 +597,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB7_6: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a1
 ; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs1, rtz
 ; CHECK-NOV-NEXT:    bltu s1, a1, .LBB7_2
@@ -714,6 +718,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
 ; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
 ; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 0(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 8(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 16(a1)
@@ -782,6 +787,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB8_6: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a2
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
 ; CHECK-NOV-NEXT:    blt s1, a2, .LBB8_2
@@ -1241,6 +1247,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
 ; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
 ; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 32(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 40(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 48(a1)
@@ -1362,6 +1369,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB15_18: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a7
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
 ; CHECK-NOV-NEXT:    blt s1, a7, .LBB15_2
@@ -1623,6 +1631,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
 ; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
 ; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 32(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 40(a1)
 ; CHECK-NOV-NEXT:    lhu s3, 48(a1)
@@ -1727,6 +1736,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB16_10: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a1
 ; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs5, rtz
 ; CHECK-NOV-NEXT:    bltu s1, a1, .LBB16_2
@@ -1961,6 +1971,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
 ; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
 ; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 32(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 40(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 48(a1)
@@ -2089,6 +2100,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB17_10: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a3
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
 ; CHECK-NOV-NEXT:    blt s1, a3, .LBB17_2
@@ -3885,6 +3897,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
 ; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
 ; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 0(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 8(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 16(a1)
@@ -3949,6 +3962,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB33_10: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a4
 ; CHECK-NOV-NEXT:    fcvt.l.s a2, fs1, rtz
 ; CHECK-NOV-NEXT:    blt s1, a4, .LBB33_2
@@ -4081,6 +4095,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
 ; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
 ; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 0(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 8(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 16(a1)
@@ -4137,6 +4152,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB34_6: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a1
 ; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs1, rtz
 ; CHECK-NOV-NEXT:    bltu s1, a1, .LBB34_2
@@ -4256,6 +4272,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
 ; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
 ; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 0(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 8(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 16(a1)
@@ -4324,6 +4341,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB35_6: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a2
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
 ; CHECK-NOV-NEXT:    blt s1, a2, .LBB35_2
@@ -4771,6 +4789,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
 ; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
 ; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 32(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 40(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 48(a1)
@@ -4892,6 +4911,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB42_18: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a7
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
 ; CHECK-NOV-NEXT:    blt s1, a7, .LBB42_2
@@ -5151,6 +5171,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
 ; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
 ; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 32(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 40(a1)
 ; CHECK-NOV-NEXT:    lhu s3, 48(a1)
@@ -5255,6 +5276,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB43_10: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a1
 ; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs5, rtz
 ; CHECK-NOV-NEXT:    bltu s1, a1, .LBB43_2
@@ -5488,6 +5510,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
 ; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
 ; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
+; CHECK-NOV-NEXT:    .cfi_remember_state
 ; CHECK-NOV-NEXT:    lhu s1, 32(a1)
 ; CHECK-NOV-NEXT:    lhu s2, 40(a1)
 ; CHECK-NOV-NEXT:    lhu a2, 48(a1)
@@ -5616,6 +5639,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NOV-NEXT:    ret
 ; CHECK-NOV-NEXT:  .LBB44_10: # %entry
+; CHECK-NOV-NEXT:    .cfi_restore_state
 ; CHECK-NOV-NEXT:    mv a0, a3
 ; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
 ; CHECK-NOV-NEXT:    blt s1, a3, .LBB44_2
diff --git a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
index 37eb0c34a0840a..f69c636a9a183f 100644
--- a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll
@@ -70,6 +70,7 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-NEXT:    .cfi_def_cfa_offset 32
 ; ILP32E-NEXT:    sw ra, 4(sp) # 4-byte Folded Spill
 ; ILP32E-NEXT:    .cfi_offset ra, -28
+; ILP32E-NEXT:    .cfi_remember_state
 ; ILP32E-NEXT:    sw a5, 28(sp)
 ; ILP32E-NEXT:    sw a4, 24(sp)
 ; ILP32E-NEXT:    sw a3, 20(sp)
@@ -94,6 +95,7 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-NEXT:    ret
 ; ILP32E-NEXT:  .LBB1_2: # %if.then
+; ILP32E-NEXT:    .cfi_restore_state
 ; ILP32E-NEXT:    call abort
 ;
 ; ILP32E-WITHFP-LABEL: va_double:
@@ -106,6 +108,7 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-WITHFP-NEXT:    .cfi_offset s0, -32
 ; ILP32E-WITHFP-NEXT:    addi s0, sp, 12
 ; ILP32E-WITHFP-NEXT:    .cfi_def_cfa s0, 24
+; ILP32E-WITHFP-NEXT:    .cfi_remember_state
 ; ILP32E-WITHFP-NEXT:    sw a5, 20(s0)
 ; ILP32E-WITHFP-NEXT:    sw a4, 16(s0)
 ; ILP32E-WITHFP-NEXT:    sw a3, 12(s0)
@@ -132,6 +135,7 @@ define void @va_double(i32 %n, ...) {
 ; ILP32E-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; ILP32E-WITHFP-NEXT:    ret
 ; ILP32E-WITHFP-NEXT:  .LBB1_2: # %if.then
+; ILP32E-WITHFP-NEXT:    .cfi_restore_state
 ; ILP32E-WITHFP-NEXT:    call abort
 entry:
   %args = alloca ptr, align 4

>From 19d29c9671f033084b2d7c01bcba7cdba581b6c9 Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Mon, 30 Sep 2024 22:47:44 +0300
Subject: [PATCH 5/7] [RISCV][CFI] add branch_and_tail_call test for CFIFixup

---
 llvm/test/CodeGen/RISCV/frame-info.ll | 269 ++++++++++++++++++++++++++
 1 file changed, 269 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll
index 279904dda5afe2..f89b0f908a1cbb 100644
--- a/llvm/test/CodeGen/RISCV/frame-info.ll
+++ b/llvm/test/CodeGen/RISCV/frame-info.ll
@@ -8,6 +8,15 @@
 ; RUN: llc -mtriple=riscv64 -frame-pointer=all -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64-WITHFP %s
 
+; RUN: llc -mtriple=riscv32 --enable-shrink-wrap=false < %s \
+; RUN:   | FileCheck -check-prefix=RV32-DISABLESW %s
+; RUN: llc -mtriple=riscv64 --enable-shrink-wrap=false < %s \
+; RUN:   | FileCheck -check-prefix=RV64-DISABLESW %s
+; RUN: llc -mtriple=riscv32 -frame-pointer=all --enable-shrink-wrap=false -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32-WITHFP-DISABLESW %s
+; RUN: llc -mtriple=riscv64 -frame-pointer=all --enable-shrink-wrap=false -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64-WITHFP-DISABLESW %s
+
 define void @trivial() {
 ; RV32-LABEL: trivial:
 ; RV32:       # %bb.0:
@@ -52,6 +61,50 @@ define void @trivial() {
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
+;
+; RV32-DISABLESW-LABEL: trivial:
+; RV32-DISABLESW:       # %bb.0:
+; RV32-DISABLESW-NEXT:    ret
+;
+; RV64-DISABLESW-LABEL: trivial:
+; RV64-DISABLESW:       # %bb.0:
+; RV64-DISABLESW-NEXT:    ret
+;
+; RV32-WITHFP-DISABLESW-LABEL: trivial:
+; RV32-WITHFP-DISABLESW:       # %bb.0:
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, -16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV32-WITHFP-DISABLESW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-DISABLESW-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_offset ra, -4
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_offset s0, -8
+; RV32-WITHFP-DISABLESW-NEXT:    addi s0, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV32-WITHFP-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-WITHFP-DISABLESW-NEXT:    ret
+;
+; RV64-WITHFP-DISABLESW-LABEL: trivial:
+; RV64-WITHFP-DISABLESW:       # %bb.0:
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, -16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV64-WITHFP-DISABLESW-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-DISABLESW-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_offset ra, -8
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_offset s0, -16
+; RV64-WITHFP-DISABLESW-NEXT:    addi s0, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV64-WITHFP-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-WITHFP-DISABLESW-NEXT:    ret
   ret void
 }
 
@@ -159,6 +212,110 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
+;
+; RV32-DISABLESW-LABEL: stack_alloc:
+; RV32-DISABLESW:       # %bb.0: # %entry
+; RV32-DISABLESW-NEXT:    addi sp, sp, -16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV32-DISABLESW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-DISABLESW-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-DISABLESW-NEXT:    .cfi_offset ra, -4
+; RV32-DISABLESW-NEXT:    .cfi_offset s0, -8
+; RV32-DISABLESW-NEXT:    addi s0, sp, 16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV32-DISABLESW-NEXT:    addi a0, a0, 15
+; RV32-DISABLESW-NEXT:    andi a0, a0, -16
+; RV32-DISABLESW-NEXT:    sub a0, sp, a0
+; RV32-DISABLESW-NEXT:    mv sp, a0
+; RV32-DISABLESW-NEXT:    call callee_with_args
+; RV32-DISABLESW-NEXT:    addi sp, s0, -16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa sp, 16
+; RV32-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-DISABLESW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-DISABLESW-NEXT:    .cfi_restore s0
+; RV32-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-DISABLESW-NEXT:    ret
+;
+; RV64-DISABLESW-LABEL: stack_alloc:
+; RV64-DISABLESW:       # %bb.0: # %entry
+; RV64-DISABLESW-NEXT:    addi sp, sp, -16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV64-DISABLESW-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-DISABLESW-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-DISABLESW-NEXT:    .cfi_offset ra, -8
+; RV64-DISABLESW-NEXT:    .cfi_offset s0, -16
+; RV64-DISABLESW-NEXT:    addi s0, sp, 16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV64-DISABLESW-NEXT:    slli a0, a0, 32
+; RV64-DISABLESW-NEXT:    srli a0, a0, 32
+; RV64-DISABLESW-NEXT:    addi a0, a0, 15
+; RV64-DISABLESW-NEXT:    andi a0, a0, -16
+; RV64-DISABLESW-NEXT:    sub a0, sp, a0
+; RV64-DISABLESW-NEXT:    mv sp, a0
+; RV64-DISABLESW-NEXT:    call callee_with_args
+; RV64-DISABLESW-NEXT:    addi sp, s0, -16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa sp, 16
+; RV64-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-DISABLESW-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-DISABLESW-NEXT:    .cfi_restore s0
+; RV64-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-DISABLESW-NEXT:    ret
+;
+; RV32-WITHFP-DISABLESW-LABEL: stack_alloc:
+; RV32-WITHFP-DISABLESW:       # %bb.0: # %entry
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, -16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV32-WITHFP-DISABLESW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-DISABLESW-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_offset ra, -4
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_offset s0, -8
+; RV32-WITHFP-DISABLESW-NEXT:    addi s0, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV32-WITHFP-DISABLESW-NEXT:    addi a0, a0, 15
+; RV32-WITHFP-DISABLESW-NEXT:    andi a0, a0, -16
+; RV32-WITHFP-DISABLESW-NEXT:    sub a0, sp, a0
+; RV32-WITHFP-DISABLESW-NEXT:    mv sp, a0
+; RV32-WITHFP-DISABLESW-NEXT:    call callee_with_args
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, s0, -16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-WITHFP-DISABLESW-NEXT:    ret
+;
+; RV64-WITHFP-DISABLESW-LABEL: stack_alloc:
+; RV64-WITHFP-DISABLESW:       # %bb.0: # %entry
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, -16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV64-WITHFP-DISABLESW-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-DISABLESW-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_offset ra, -8
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_offset s0, -16
+; RV64-WITHFP-DISABLESW-NEXT:    addi s0, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV64-WITHFP-DISABLESW-NEXT:    slli a0, a0, 32
+; RV64-WITHFP-DISABLESW-NEXT:    srli a0, a0, 32
+; RV64-WITHFP-DISABLESW-NEXT:    addi a0, a0, 15
+; RV64-WITHFP-DISABLESW-NEXT:    andi a0, a0, -16
+; RV64-WITHFP-DISABLESW-NEXT:    sub a0, sp, a0
+; RV64-WITHFP-DISABLESW-NEXT:    mv sp, a0
+; RV64-WITHFP-DISABLESW-NEXT:    call callee_with_args
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, s0, -16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-WITHFP-DISABLESW-NEXT:    ret
 entry:
   %0 = alloca i8, i32 %size, align 16
   call void @callee_with_args(ptr nonnull %0)
@@ -249,6 +406,118 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-WITHFP-NEXT:    ret
+;
+; RV32-DISABLESW-LABEL: branch_and_tail_call:
+; RV32-DISABLESW:       # %bb.0:
+; RV32-DISABLESW-NEXT:    addi sp, sp, -16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV32-DISABLESW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-DISABLESW-NEXT:    .cfi_offset ra, -4
+; RV32-DISABLESW-NEXT:    .cfi_remember_state
+; RV32-DISABLESW-NEXT:    andi a0, a0, 1
+; RV32-DISABLESW-NEXT:    beqz a0, .LBB2_2
+; RV32-DISABLESW-NEXT:  # %bb.1: # %blue_pill
+; RV32-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-DISABLESW-NEXT:    tail callee1
+; RV32-DISABLESW-NEXT:  .LBB2_2: # %red_pill
+; RV32-DISABLESW-NEXT:    .cfi_restore_state
+; RV32-DISABLESW-NEXT:    call callee2
+; RV32-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-DISABLESW-NEXT:    ret
+;
+; RV64-DISABLESW-LABEL: branch_and_tail_call:
+; RV64-DISABLESW:       # %bb.0:
+; RV64-DISABLESW-NEXT:    addi sp, sp, -16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV64-DISABLESW-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-DISABLESW-NEXT:    .cfi_offset ra, -8
+; RV64-DISABLESW-NEXT:    .cfi_remember_state
+; RV64-DISABLESW-NEXT:    andi a0, a0, 1
+; RV64-DISABLESW-NEXT:    beqz a0, .LBB2_2
+; RV64-DISABLESW-NEXT:  # %bb.1: # %blue_pill
+; RV64-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-DISABLESW-NEXT:    tail callee1
+; RV64-DISABLESW-NEXT:  .LBB2_2: # %red_pill
+; RV64-DISABLESW-NEXT:    .cfi_restore_state
+; RV64-DISABLESW-NEXT:    call callee2
+; RV64-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-DISABLESW-NEXT:    ret
+;
+; RV32-WITHFP-DISABLESW-LABEL: branch_and_tail_call:
+; RV32-WITHFP-DISABLESW:       # %bb.0:
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, -16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV32-WITHFP-DISABLESW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-DISABLESW-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_offset ra, -4
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_offset s0, -8
+; RV32-WITHFP-DISABLESW-NEXT:    addi s0, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_remember_state
+; RV32-WITHFP-DISABLESW-NEXT:    andi a0, a0, 1
+; RV32-WITHFP-DISABLESW-NEXT:    beqz a0, .LBB2_2
+; RV32-WITHFP-DISABLESW-NEXT:  # %bb.1: # %blue_pill
+; RV32-WITHFP-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-WITHFP-DISABLESW-NEXT:    tail callee1
+; RV32-WITHFP-DISABLESW-NEXT:  .LBB2_2: # %red_pill
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore_state
+; RV32-WITHFP-DISABLESW-NEXT:    call callee2
+; RV32-WITHFP-DISABLESW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV32-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV32-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV32-WITHFP-DISABLESW-NEXT:    ret
+;
+; RV64-WITHFP-DISABLESW-LABEL: branch_and_tail_call:
+; RV64-WITHFP-DISABLESW:       # %bb.0:
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, -16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 16
+; RV64-WITHFP-DISABLESW-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-DISABLESW-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_offset ra, -8
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_offset s0, -16
+; RV64-WITHFP-DISABLESW-NEXT:    addi s0, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa s0, 0
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_remember_state
+; RV64-WITHFP-DISABLESW-NEXT:    andi a0, a0, 1
+; RV64-WITHFP-DISABLESW-NEXT:    beqz a0, .LBB2_2
+; RV64-WITHFP-DISABLESW-NEXT:  # %bb.1: # %blue_pill
+; RV64-WITHFP-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-WITHFP-DISABLESW-NEXT:    tail callee1
+; RV64-WITHFP-DISABLESW-NEXT:  .LBB2_2: # %red_pill
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore_state
+; RV64-WITHFP-DISABLESW-NEXT:    call callee2
+; RV64-WITHFP-DISABLESW-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore ra
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_restore s0
+; RV64-WITHFP-DISABLESW-NEXT:    addi sp, sp, 16
+; RV64-WITHFP-DISABLESW-NEXT:    .cfi_def_cfa_offset 0
+; RV64-WITHFP-DISABLESW-NEXT:    ret
   br i1 %a, label %blue_pill, label %red_pill
 blue_pill:
   tail call void @callee1()

>From 03bebe3e8cbf114e3c0cac0b41f4fc337763ab07 Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Thu, 3 Oct 2024 02:02:36 +0300
Subject: [PATCH 6/7] [RISCV][CFI] fix __riscv_restore

---
 llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 872485f37c972b..66a693277a11de 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -827,7 +827,6 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
 
   bool RestoreFP = RI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
                    !hasReservedCallFrame(MF);
-
   if (RVVStackSize) {
     // If restoreFP the stack pointer will be restored using the frame pointer
     // value.
@@ -892,6 +891,19 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
         .setMIFlag(MachineInstr::FrameDestroy);
   }
 
+  if (getLibCallID(MF, CSI) + 1) {
+    // tail __riscv_restore_[0-12] instruction is considered as a terminator,
+    // therefor it is unecessary to place any CFI instructions after it. Just
+    // deallocate stack if needed and return.
+    if (StackSize != 0)
+      deallocateStack(MF, MBB, MBBI, DL, StackSize,
+                      RVFI->getLibCallStackSize());
+
+    // Emit epilogue for shadow call stack.
+    emitSCSEpilogue(MF, MBB, MBBI, DL);
+    return;
+  }
+
   bool ApplyPop = RVFI->isPushable(MF) && MBBI != MBB.end() &&
                   MBBI->getOpcode() == RISCV::CM_POP;
   if (ApplyPop) {

>From 7a136dc331a8a40d3896d81cc75eda5b038ff170 Mon Sep 17 00:00:00 2001
From: Daniil Avdeev <daniil.avdeev at syntacore.com>
Date: Thu, 3 Oct 2024 02:03:25 +0300
Subject: [PATCH 7/7] [RISCV][CFI] __riscv_restore update tests

---
 .../test/CodeGen/RISCV/calling-conv-ilp32e.ll | 126 +++---------------
 llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir   |   8 --
 llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir  |  26 ----
 3 files changed, 18 insertions(+), 142 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
index deaf32c54dacce..2c524ee61a5668 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
@@ -69,8 +69,6 @@ define i32 @callee_float_in_regs(i32 %a, float %b) {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a0, a1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call __fixsfsi
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, s0, a0
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: callee_float_in_regs:
@@ -86,9 +84,6 @@ define i32 @callee_float_in_regs(i32 %a, float %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    mv a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call __fixsfsi
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, s1, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_2
   %b_fptosi = fptosi float %b to i32
   %1 = add i32 %a, %b_fptosi
@@ -140,7 +135,6 @@ define i32 @caller_float_in_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lui a1, 262144
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_float_in_regs
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_float_in_regs:
@@ -154,8 +148,6 @@ define i32 @caller_float_in_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a1, 262144
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_float_in_regs
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_float_in_regs(i32 1, float 2.0)
   ret i32 %1
@@ -208,8 +200,6 @@ define i32 @callee_float_on_stack(i64 %a, i64 %b, i64 %c, i64 %d, float %e) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 8(s0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a1, 0(s0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = trunc i64 %d to i32
   %2 = bitcast float %e to i32
@@ -290,9 +280,8 @@ define i32 @caller_float_on_stack() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a3, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a5, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_float_on_stack
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 12
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_float_on_stack:
@@ -316,10 +305,8 @@ define i32 @caller_float_on_stack() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a3, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a5, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_float_on_stack
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 12
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_float_on_stack(i64 1, i64 2, i64 3, i64 4, float 5.0)
   ret i32 %1
@@ -364,8 +351,6 @@ define float @callee_tiny_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a0, 260096
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret float 1.0
 }
@@ -409,7 +394,6 @@ define i32 @caller_tiny_scalar_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_tiny_scalar_ret
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_tiny_scalar_ret:
@@ -421,8 +405,6 @@ define i32 @caller_tiny_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_tiny_scalar_ret
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call float @callee_tiny_scalar_ret()
   %2 = bitcast float %1 to i32
@@ -492,8 +474,6 @@ define i32 @callee_double_in_regs(i32 %a, double %b) {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a1, a2
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call __fixdfsi
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, s0, a0
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: callee_double_in_regs:
@@ -510,9 +490,6 @@ define i32 @callee_double_in_regs(i32 %a, double %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    mv a1, a2
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call __fixdfsi
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, s1, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_2
   %b_fptosi = fptosi double %b to i32
   %1 = add i32 %a, %b_fptosi
@@ -567,7 +544,6 @@ define i32 @caller_double_in_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lui a2, 262144
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a1, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_double_in_regs
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_double_in_regs:
@@ -582,8 +558,6 @@ define i32 @caller_double_in_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a2, 262144
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_double_in_regs
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_double_in_regs(i32 1, double 2.0)
   ret i32 %1
@@ -672,8 +646,6 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a4, a5, a4
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a4
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = bitcast fp128 %c to i128
   %2 = trunc i128 %1 to i32
@@ -859,10 +831,8 @@ define void @caller_aligned_stack() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_aligned_stack
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -64
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 56
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_aligned_stack:
@@ -915,10 +885,8 @@ define void @caller_aligned_stack() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_aligned_stack
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -64
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 56
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_aligned_stack(i32 1, i32 11,
     fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13,
@@ -970,8 +938,6 @@ define double @callee_small_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lui a1, 261888
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret double 1.0
 }
@@ -1015,7 +981,6 @@ define i64 @caller_small_scalar_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_small_scalar_ret
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_small_scalar_ret:
@@ -1027,8 +992,6 @@ define i64 @caller_small_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_small_scalar_ret
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call double @callee_small_scalar_ret()
   %2 = bitcast double %1 to i64
@@ -1077,8 +1040,6 @@ define i32 @callee_i64_in_regs(i32 %a, i64 %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi s0, sp, 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %b_trunc = trunc i64 %b to i32
   %1 = add i32 %a, %b_trunc
@@ -1133,7 +1094,6 @@ define i32 @caller_i64_in_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a2, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_i64_in_regs
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_i64_in_regs:
@@ -1148,8 +1108,6 @@ define i32 @caller_i64_in_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a2, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_i64_in_regs
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_i64_in_regs(i32 1, i64 2)
   ret i32 %1
@@ -1260,8 +1218,6 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a7
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a6
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %a_ext = zext i8 %a to i32
   %b_ext = zext i16 %b to i32
@@ -1355,9 +1311,8 @@ define i32 @caller_many_scalars() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw a4, 0(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a4, 0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_many_scalars
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 16
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_many_scalars:
@@ -1383,10 +1338,8 @@ define i32 @caller_many_scalars() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a4, 0(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a4, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_many_scalars
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8)
   ret i32 %1
@@ -1493,8 +1446,6 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a1, a0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a0, a3
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    seqz a0, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %b_bitcast = bitcast fp128 %b to i128
   %1 = icmp eq i128 %a, %b_bitcast
@@ -1596,10 +1547,8 @@ define i32 @caller_large_scalars() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_scalars
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -48
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 48
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 40
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalars:
@@ -1627,10 +1576,8 @@ define i32 @caller_large_scalars() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_scalars
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -48
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 48
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 40
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_large_scalars(i128 1, fp128 0xL00000000000000007FFF000000000000)
   ret i32 %1
@@ -1747,8 +1694,6 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d,
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a1, a0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    or a0, a0, a3
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    seqz a0, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %j_bitcast = bitcast fp128 %j to i128
   %1 = icmp eq i128 %h, %j_bitcast
@@ -1886,10 +1831,8 @@ define i32 @caller_large_scalars_exhausted_regs() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_scalars_exhausted_regs
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -64
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 56
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalars_exhausted_regs:
@@ -1929,10 +1872,8 @@ define i32 @caller_large_scalars_exhausted_regs() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_scalars_exhausted_regs
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -64
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 64
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 56
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_large_scalars_exhausted_regs(
       i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 8, i32 9,
@@ -2012,10 +1953,8 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    lw a0, 0(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -24
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 16
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_mixed_scalar_libcalls:
@@ -2035,10 +1974,8 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 0(sp)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -24
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = sitofp i64 %a to fp128
   %2 = bitcast fp128 %1 to i128
@@ -2094,8 +2031,6 @@ define i32 @callee_small_coerced_struct([2 x i32] %a.coerce) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    xor a0, a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    seqz a0, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = extractvalue [2 x i32] %a.coerce, 0
   %2 = extractvalue [2 x i32] %a.coerce, 1
@@ -2149,7 +2084,6 @@ define i32 @caller_small_coerced_struct() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_small_coerced_struct
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_small_coerced_struct:
@@ -2163,8 +2097,6 @@ define i32 @caller_small_coerced_struct() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 2
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_small_coerced_struct
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call i32 @callee_small_coerced_struct([2 x i32] [i32 1, i32 2])
   ret i32 %1
@@ -2221,8 +2153,6 @@ define i32 @callee_large_struct(ptr byval(%struct.large) align 4 %a) {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a1, 0(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    lw a0, 12(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a1, a0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = getelementptr inbounds %struct.large, ptr %a, i32 0, i32 0
   %2 = getelementptr inbounds %struct.large, ptr %a, i32 0, i32 3
@@ -2311,9 +2241,8 @@ define i32 @caller_large_struct() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    sw a3, 12(sp)
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    mv a0, sp
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_struct
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 32
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_struct:
@@ -2339,10 +2268,8 @@ define i32 @caller_large_struct() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a3, -28(s0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi a0, s0, -40
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_struct
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 32
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %ls = alloca %struct.large, align 4
   %1 = bitcast ptr %ls to ptr
@@ -2403,8 +2330,6 @@ define %struct.small @callee_small_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a0, 1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 0
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret %struct.small { i32 1, ptr null }
 }
@@ -2451,7 +2376,6 @@ define i32 @caller_small_struct_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_offset ra, -4
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_small_struct_ret
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, a0, a1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_0
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_small_struct_ret:
@@ -2464,8 +2388,6 @@ define i32 @caller_small_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa s0, 0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_small_struct_ret
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call %struct.small @callee_small_struct_ret()
   %2 = extractvalue %struct.small %1, 0
@@ -2532,8 +2454,6 @@ define fp128 @callee_large_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 8(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 4(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw zero, 0(a0)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   ret fp128 0xL00000000000000007FFF000000000000
 }
@@ -2599,10 +2519,8 @@ define void @caller_large_scalar_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    call callee_large_scalar_ret
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -32
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 32
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 24
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_scalar_ret:
@@ -2619,10 +2537,8 @@ define void @caller_large_scalar_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    call callee_large_scalar_ret
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -32
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 32
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 24
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = call fp128 @callee_large_scalar_ret()
   ret void
@@ -2697,8 +2613,6 @@ define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a1, 8(a0)
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    li a1, 4
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    sw a1, 12(a0)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %a = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 0
   store i32 1, ptr %a, align 4
@@ -2781,10 +2695,8 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    add a0, a0, a1
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, s0, -24
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    addi sp, sp, 16
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-FPELIM-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
 ;
 ; ILP32E-WITHFP-SAVE-RESTORE-LABEL: caller_large_struct_ret:
@@ -2804,10 +2716,8 @@ define i32 @caller_large_struct_ret() {
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    add a0, a0, a1
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, s0, -24
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa sp, 24
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore ra
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_restore s0
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    addi sp, sp, 16
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 0
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    .cfi_def_cfa_offset 8
 ; ILP32E-WITHFP-SAVE-RESTORE-NEXT:    tail __riscv_restore_1
   %1 = alloca %struct.large
   call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
diff --git a/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir b/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
index b465f5a9567371..907c95aee0588e 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
+++ b/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
@@ -40,8 +40,6 @@ body:                   |
     ; CHECK-LIBCALL32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
     ; CHECK-LIBCALL32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL32-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2
     ;
     ; CHECK-ZCMP64-LABEL: name: popret_rvlist5
@@ -68,8 +66,6 @@ body:                   |
     ; CHECK-LIBCALL64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -16
     ; CHECK-LIBCALL64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL64-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2
     ;
     ; CHECK-NO-ZCMP32-LABEL: name: popret_rvlist5
@@ -144,8 +140,6 @@ body:                   |
     ; CHECK-LIBCALL32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x8 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x10 = ADDI $x0, 0
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL32-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2, implicit $x10
     ;
     ; CHECK-ZCMP64-LABEL: name: popretz_rvlist5
@@ -174,8 +168,6 @@ body:                   |
     ; CHECK-LIBCALL64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x8 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x10 = ADDI $x0, 0
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
     ; CHECK-LIBCALL64-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_1, implicit $x2, implicit $x10
     ;
     ; CHECK-NO-ZCMP32-LABEL: name: popretz_rvlist5
diff --git a/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir b/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir
index 5c516e9bb4bcc3..aea6bb7e18ffb8 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir
+++ b/llvm/test/CodeGen/RISCV/zcmp-cm-push-pop.mir
@@ -95,19 +95,6 @@ body:                   |
     ; CHECK-LIBCALL32-NEXT: $x25 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x26 = IMPLICIT_DEF
     ; CHECK-LIBCALL32-NEXT: $x27 = IMPLICIT_DEF
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
-    ; CHECK-LIBCALL32-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-LIBCALL32-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_12, implicit $x2
     ;
     ; CHECK-ZCMP64-LABEL: name: push_rvlist15
@@ -189,19 +176,6 @@ body:                   |
     ; CHECK-LIBCALL64-NEXT: $x25 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x26 = IMPLICIT_DEF
     ; CHECK-LIBCALL64-NEXT: $x27 = IMPLICIT_DEF
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x9
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x18
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x19
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x20
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x21
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x22
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x23
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x24
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x25
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x26
-    ; CHECK-LIBCALL64-NEXT: frame-destroy CFI_INSTRUCTION restore $x27
     ; CHECK-LIBCALL64-NEXT: frame-destroy PseudoTAIL target-flags(riscv-call) &__riscv_restore_12, implicit $x2
     ;
     ; CHECK-NO-ZCMP32-LABEL: name: push_rvlist15



More information about the llvm-branch-commits mailing list