[llvm] e49180d - [RISCV] Xqccmp Code Generation (#128815)

via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 5 10:59:49 PST 2025


Author: Sam Elliott
Date: 2025-03-05T10:59:45-08:00
New Revision: e49180d84c4d8b25fa944e494f4f292479eec1f6

URL: https://github.com/llvm/llvm-project/commit/e49180d84c4d8b25fa944e494f4f292479eec1f6
DIFF: https://github.com/llvm/llvm-project/commit/e49180d84c4d8b25fa944e494f4f292479eec1f6.diff

LOG: [RISCV] Xqccmp Code Generation (#128815)

This adds support for Xqccmp to the following passes:
- Prolog Epilog Insertion - reusing much of the existing push/pop logic,
but extending it to cope with frame pointers and reorder the CFI
information correctly.
- Move Merger - extending it to support the `qc.` variants of the
double-move instructions.
- Push/Pop Optimizer - extending it to support the `qc.` variants of the
pop instructions.

The testing is based on existing Zcmp tests, but I have put them in
separate files as some of the Zcmp tests were getting quite long.

Added: 
    llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
    llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll
    llvm/test/CodeGen/RISCV/xqccmp-cm-popretz.mir
    llvm/test/CodeGen/RISCV/xqccmp-cm-push-pop.mir
    llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll
    llvm/test/CodeGen/RISCV/xqccmp-with-float.ll
    llvm/test/CodeGen/RISCV/xqccmp_mvas_mvsa.mir

Modified: 
    llvm/docs/ReleaseNotes.md
    llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
    llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp
    llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
    llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
    llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp
    llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir

Removed: 
    


################################################################################
diff  --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 2fb2be1e77793..e7eaa60b88c63 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -109,8 +109,9 @@ Changes to the RISC-V Backend
 
 * Adds experimental assembler support for the Qualcomm uC 'Xqcilia` (Large Immediate Arithmetic)
   extension.
-* Adds experimental assembler support for the Qualcomm 'Xqccmp' extension, which
-  is a frame-pointer convention compatible version of Zcmp.
+* Adds experimental assembler and code generation support for the Qualcomm
+  'Xqccmp' extension, which is a frame-pointer convention compatible version of
+  Zcmp.
 * Added non-quadratic ``log-vrgather`` cost model for ``vrgather.vv`` instruction
 
 Changes to the WebAssembly Backend

diff  --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 38dfd1e57f345..e14e6b5a77893 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -750,6 +750,54 @@ void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB,
   }
 }
 
+static bool isPush(unsigned Opcode) {
+  switch (Opcode) {
+  case RISCV::CM_PUSH:
+  case RISCV::QC_CM_PUSH:
+  case RISCV::QC_CM_PUSHFP:
+    return true;
+  default:
+    return false;
+  }
+}
+
+static bool isPop(unsigned Opcode) {
+  // There are other pops but these are the only ones introduced during this
+  // pass.
+  switch (Opcode) {
+  case RISCV::CM_POP:
+  case RISCV::QC_CM_POP:
+    return true;
+  default:
+    return false;
+  }
+}
+
+static unsigned getPushOpcode(RISCVMachineFunctionInfo::PushPopKind Kind,
+                              bool HasFP) {
+  switch (Kind) {
+  case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
+    return RISCV::CM_PUSH;
+  case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
+    return HasFP ? RISCV::QC_CM_PUSHFP : RISCV::QC_CM_PUSH;
+  default:
+    llvm_unreachable("Unhandled PushPopKind");
+  }
+}
+
+static unsigned getPopOpcode(RISCVMachineFunctionInfo::PushPopKind Kind) {
+  // There are other pops but they are introduced later by the Push/Pop
+  // Optimizer.
+  switch (Kind) {
+  case RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp:
+    return RISCV::CM_POP;
+  case RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp:
+    return RISCV::QC_CM_POP;
+  default:
+    llvm_unreachable("Unhandled PushPopKind");
+  }
+}
+
 void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
                                       MachineBasicBlock &MBB) const {
   MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -849,7 +897,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
   }
 
   if (RVFI->isPushable(MF) && FirstFrameSetup != MBB.end() &&
-      FirstFrameSetup->getOpcode() == RISCV::CM_PUSH) {
+      isPush(FirstFrameSetup->getOpcode())) {
     // Use available stack adjustment in push instruction to allocate additional
     // stack space. Align the stack size down to a multiple of 16. This is
     // needed for RVE.
@@ -900,9 +948,15 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
     // The frame pointer does need to be reserved from register allocation.
     assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
 
-    RI->adjustReg(MBB, MBBI, DL, FPReg, SPReg,
-                  StackOffset::getFixed(RealStackSize - RVFI->getVarArgsSaveSize()),
-                  MachineInstr::FrameSetup, getStackAlign());
+    // Xqccmp with hasFP will update FP using `qc.cm.pushfp`, so we don't need
+    // to update it again, but we do need to emit the `.cfi_def_cfa` below.
+    if (RVFI->getPushPopKind(MF) !=
+        RISCVMachineFunctionInfo::PushPopKind::VendorXqccmp) {
+      RI->adjustReg(
+          MBB, MBBI, DL, FPReg, SPReg,
+          StackOffset::getFixed(RealStackSize - RVFI->getVarArgsSaveSize()),
+          MachineInstr::FrameSetup, getStackAlign());
+    }
 
     // Emit ".cfi_def_cfa $fp, RVFI->getVarArgsSaveSize()"
     unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
@@ -1160,9 +1214,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
   // Recover callee-saved registers.
   emitCFIForCSI<CFIRestoreRegisterEmitter>(MBB, MBBI, getUnmanagedCSI(MF, CSI));
 
-  bool ApplyPop = RVFI->isPushable(MF) && MBBI != MBB.end() &&
-                  MBBI->getOpcode() == RISCV::CM_POP;
-  if (ApplyPop) {
+  if (RVFI->isPushable(MF) && MBBI != MBB.end() && isPop(MBBI->getOpcode())) {
     // Use available stack adjustment in pop instruction to deallocate stack
     // space. Align the stack size down to a multiple of 16. This is needed for
     // RVE.
@@ -1781,7 +1833,8 @@ bool RISCVFrameLowering::assignCalleeSavedSpillSlots(
 
       if (FII != std::end(FixedCSRFIMap)) {
         int64_t Offset;
-        if (RVFI->isPushable(MF))
+        if (RVFI->getPushPopKind(MF) ==
+            RISCVMachineFunctionInfo::PushPopKind::StdExtZcmp)
           Offset = -int64_t(RVFI->getRVPushRegs() - RegNum) * Size;
         else
           Offset = -int64_t(RegNum + 1) * Size;
@@ -1845,9 +1898,10 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
     unsigned PushedRegNum = RVFI->getRVPushRegs();
     if (PushedRegNum > 0) {
       // Use encoded number to represent registers to spill.
+      unsigned Opcode = getPushOpcode(RVFI->getPushPopKind(*MF), hasFP(*MF));
       unsigned RegEnc = RISCVZC::encodeRlistNumRegs(PushedRegNum);
       MachineInstrBuilder PushBuilder =
-          BuildMI(MBB, MI, DL, TII.get(RISCV::CM_PUSH))
+          BuildMI(MBB, MI, DL, TII.get(Opcode))
               .setMIFlag(MachineInstr::FrameSetup);
       PushBuilder.addImm(RegEnc);
       PushBuilder.addImm(0);
@@ -2000,9 +2054,10 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
   if (RVFI->isPushable(*MF)) {
     unsigned PushedRegNum = RVFI->getRVPushRegs();
     if (PushedRegNum > 0) {
+      unsigned Opcode = getPopOpcode(RVFI->getPushPopKind(*MF));
       unsigned RegEnc = RISCVZC::encodeRlistNumRegs(PushedRegNum);
       MachineInstrBuilder PopBuilder =
-          BuildMI(MBB, MI, DL, TII.get(RISCV::CM_POP))
+          BuildMI(MBB, MI, DL, TII.get(Opcode))
               .setMIFlag(MachineInstr::FrameDestroy);
       // Use encoded number to represent registers to restore.
       PopBuilder.addImm(RegEnc);

diff  --git a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp
index a0d7931763818..a4b89a42f3574 100644
--- a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.cpp
@@ -60,6 +60,27 @@ void yaml::RISCVMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) {
   MappingTraits<RISCVMachineFunctionInfo>::mapping(YamlIO, *this);
 }
 
+RISCVMachineFunctionInfo::PushPopKind
+RISCVMachineFunctionInfo::getPushPopKind(const MachineFunction &MF) const {
+  // We cannot use fixed locations for the callee saved spill slots if the
+  // function uses a varargs save area.
+  // TODO: Use a separate placement for vararg registers to enable Zcmp.
+  if (VarArgsSaveSize != 0)
+    return PushPopKind::None;
+
+  // Zcmp is not compatible with the frame pointer convention.
+  if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZcmp() &&
+      !MF.getTarget().Options.DisableFramePointerElim(MF))
+    return PushPopKind::StdExtZcmp;
+
+  // Xqccmp is Zcmp but has a push order compatible with the frame-pointer
+  // convention.
+  if (MF.getSubtarget<RISCVSubtarget>().hasVendorXqccmp())
+    return PushPopKind::VendorXqccmp;
+
+  return PushPopKind::None;
+}
+
 void RISCVMachineFunctionInfo::initializeBaseYamlFields(
     const yaml::RISCVMachineFunctionInfo &YamlMFI) {
   VarArgsFrameIndex = YamlMFI.VarArgsFrameIndex;

diff  --git a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
index 3f88439086e56..94e9ad2a7e66b 100644
--- a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
@@ -136,13 +136,12 @@ class RISCVMachineFunctionInfo : public MachineFunctionInfo {
   unsigned getCalleeSavedStackSize() const { return CalleeSavedStackSize; }
   void setCalleeSavedStackSize(unsigned Size) { CalleeSavedStackSize = Size; }
 
+  enum class PushPopKind { None = 0, StdExtZcmp, VendorXqccmp };
+
+  PushPopKind getPushPopKind(const MachineFunction &MF) const;
+
   bool isPushable(const MachineFunction &MF) const {
-    // We cannot use fixed locations for the callee saved spill slots if the
-    // function uses a varargs save area.
-    // TODO: Use a separate placement for vararg registers to enable Zcmp.
-    return MF.getSubtarget<RISCVSubtarget>().hasStdExtZcmp() &&
-           !MF.getTarget().Options.DisableFramePointerElim(MF) &&
-           VarArgsSaveSize == 0;
+    return getPushPopKind(MF) != PushPopKind::None;
   }
 
   unsigned getRVPushRegs() const { return RVPushRegs; }

diff  --git a/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp b/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
index 3c5462057b280..7a2541a652b58 100644
--- a/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMoveMerger.cpp
@@ -9,10 +9,12 @@
 // This file contains a pass that performs move related peephole optimizations
 // as Zcmp has specified. This pass should be run after register allocation.
 //
+// This pass also supports Xqccmp, which has identical instructions.
+//
 //===----------------------------------------------------------------------===//
 
 #include "RISCVInstrInfo.h"
-#include "RISCVMachineFunctionInfo.h"
+#include "RISCVSubtarget.h"
 
 using namespace llvm;
 
@@ -43,7 +45,7 @@ struct RISCVMoveMerge : public MachineFunctionPass {
   MachineBasicBlock::iterator
   findMatchingInst(MachineBasicBlock::iterator &MBBI, unsigned InstOpcode,
                    const DestSourcePair &RegPair);
-  bool mergeMoveSARegPair(MachineBasicBlock &MBB);
+  bool mergeMoveSARegPair(const RISCVSubtarget &STI, MachineBasicBlock &MBB);
   bool runOnMachineFunction(MachineFunction &Fn) override;
 
   StringRef getPassName() const override { return RISCV_MOVE_MERGE_NAME; }
@@ -56,6 +58,46 @@ char RISCVMoveMerge::ID = 0;
 INITIALIZE_PASS(RISCVMoveMerge, "riscv-move-merge", RISCV_MOVE_MERGE_NAME,
                 false, false)
 
+static bool isMoveFromAToS(unsigned Opcode) {
+  switch (Opcode) {
+  case RISCV::CM_MVA01S:
+  case RISCV::QC_CM_MVA01S:
+    return true;
+  default:
+    return false;
+  }
+}
+
+static unsigned getMoveFromAToSOpcode(const RISCVSubtarget &STI) {
+  if (STI.hasStdExtZcmp())
+    return RISCV::CM_MVA01S;
+
+  if (STI.hasVendorXqccmp())
+    return RISCV::QC_CM_MVA01S;
+
+  llvm_unreachable("Unhandled subtarget with paired A to S move.");
+}
+
+static bool isMoveFromSToA(unsigned Opcode) {
+  switch (Opcode) {
+  case RISCV::CM_MVSA01:
+  case RISCV::QC_CM_MVSA01:
+    return true;
+  default:
+    return false;
+  }
+}
+
+static unsigned getMoveFromSToAOpcode(const RISCVSubtarget &STI) {
+  if (STI.hasStdExtZcmp())
+    return RISCV::CM_MVSA01;
+
+  if (STI.hasVendorXqccmp())
+    return RISCV::QC_CM_MVSA01;
+
+  llvm_unreachable("Unhandled subtarget with paired S to A move");
+}
+
 // Check if registers meet CM.MVA01S constraints.
 bool RISCVMoveMerge::isCandidateToMergeMVA01S(const DestSourcePair &RegPair) {
   Register Destination = RegPair.Destination->getReg();
@@ -87,7 +129,7 @@ RISCVMoveMerge::mergePairedInsns(MachineBasicBlock::iterator I,
   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
   DestSourcePair FirstPair = TII->isCopyInstrImpl(*I).value();
   DestSourcePair PairedRegs = TII->isCopyInstrImpl(*Paired).value();
-  Register ARegInFirstPair = Opcode == RISCV::CM_MVA01S
+  Register ARegInFirstPair = isMoveFromAToS(Opcode)
                                  ? FirstPair.Destination->getReg()
                                  : FirstPair.Source->getReg();
 
@@ -104,7 +146,7 @@ RISCVMoveMerge::mergePairedInsns(MachineBasicBlock::iterator I,
   //   mv a0, s2
   //   mv a1, s1    =>  cm.mva01s s2,s1
   bool StartWithX10 = ARegInFirstPair == RISCV::X10;
-  if (Opcode == RISCV::CM_MVA01S) {
+  if (isMoveFromAToS(Opcode)) {
     Sreg1 = StartWithX10 ? FirstPair.Source : PairedRegs.Source;
     Sreg2 = StartWithX10 ? PairedRegs.Source : FirstPair.Source;
   } else {
@@ -139,8 +181,7 @@ RISCVMoveMerge::findMatchingInst(MachineBasicBlock::iterator &MBBI,
       Register SourceReg = SecondPair->Source->getReg();
       Register DestReg = SecondPair->Destination->getReg();
 
-      if (InstOpcode == RISCV::CM_MVA01S &&
-          isCandidateToMergeMVA01S(*SecondPair)) {
+      if (isMoveFromAToS(InstOpcode) && isCandidateToMergeMVA01S(*SecondPair)) {
         // If register pair is valid and destination registers are 
diff erent.
         if ((RegPair.Destination->getReg() == DestReg))
           return E;
@@ -154,7 +195,7 @@ RISCVMoveMerge::findMatchingInst(MachineBasicBlock::iterator &MBBI,
           return E;
 
         return I;
-      } else if (InstOpcode == RISCV::CM_MVSA01 &&
+      } else if (isMoveFromSToA(InstOpcode) &&
                  isCandidateToMergeMVSA01(*SecondPair)) {
         if ((RegPair.Source->getReg() == SourceReg) ||
             (RegPair.Destination->getReg() == DestReg))
@@ -176,7 +217,8 @@ RISCVMoveMerge::findMatchingInst(MachineBasicBlock::iterator &MBBI,
 
 // Finds instructions, which could be represented as C.MV instructions and
 // merged into CM.MVA01S or CM.MVSA01.
-bool RISCVMoveMerge::mergeMoveSARegPair(MachineBasicBlock &MBB) {
+bool RISCVMoveMerge::mergeMoveSARegPair(const RISCVSubtarget &STI,
+                                        MachineBasicBlock &MBB) {
   bool Modified = false;
 
   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
@@ -188,9 +230,9 @@ bool RISCVMoveMerge::mergeMoveSARegPair(MachineBasicBlock &MBB) {
       unsigned Opcode = 0;
 
       if (isCandidateToMergeMVA01S(*RegPair))
-        Opcode = RISCV::CM_MVA01S;
+        Opcode = getMoveFromAToSOpcode(STI);
       else if (isCandidateToMergeMVSA01(*RegPair))
-        Opcode = RISCV::CM_MVSA01;
+        Opcode = getMoveFromSToAOpcode(STI);
       else {
         ++MBBI;
         continue;
@@ -215,7 +257,7 @@ bool RISCVMoveMerge::runOnMachineFunction(MachineFunction &Fn) {
     return false;
 
   const RISCVSubtarget *Subtarget = &Fn.getSubtarget<RISCVSubtarget>();
-  if (!Subtarget->hasStdExtZcmp())
+  if (!(Subtarget->hasStdExtZcmp() || Subtarget->hasVendorXqccmp()))
     return false;
 
   TII = Subtarget->getInstrInfo();
@@ -227,7 +269,7 @@ bool RISCVMoveMerge::runOnMachineFunction(MachineFunction &Fn) {
   UsedRegUnits.init(*TRI);
   bool Modified = false;
   for (auto &MBB : Fn)
-    Modified |= mergeMoveSARegPair(MBB);
+    Modified |= mergeMoveSARegPair(*Subtarget, MBB);
   return Modified;
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp
index 098e5bb5328bb..0ead9a4009fab 100644
--- a/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVPushPopOptimizer.cpp
@@ -13,6 +13,7 @@
 
 #include "RISCVInstrInfo.h"
 #include "RISCVMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
 
 using namespace llvm;
 
@@ -45,11 +46,34 @@ char RISCVPushPopOpt::ID = 0;
 INITIALIZE_PASS(RISCVPushPopOpt, "riscv-push-pop-opt", RISCV_PUSH_POP_OPT_NAME,
                 false, false)
 
+static bool isPop(unsigned Opcode) {
+  switch (Opcode) {
+  case RISCV::CM_POP:
+  case RISCV::QC_CM_POP:
+    return true;
+  default:
+    return false;
+  }
+}
+
+static unsigned getPopRetOpcode(unsigned PopOpcode, bool IsReturnZero) {
+  assert(isPop(PopOpcode) && "Unexpected Pop Opcode");
+
+  switch (PopOpcode) {
+  case RISCV::CM_POP:
+    return IsReturnZero ? RISCV::CM_POPRETZ : RISCV::CM_POPRET;
+  case RISCV::QC_CM_POP:
+    return IsReturnZero ? RISCV::QC_CM_POPRETZ : RISCV::QC_CM_POPRET;
+  default:
+    llvm_unreachable("Unhandled Pop Opcode");
+  }
+}
+
 // Check if POP instruction was inserted into the MBB and return iterator to it.
 static MachineBasicBlock::iterator containsPop(MachineBasicBlock &MBB) {
   for (MachineBasicBlock::iterator MBBI = MBB.begin(); MBBI != MBB.end();
        MBBI = next_nodbg(MBBI, MBB.end()))
-    if (MBBI->getOpcode() == RISCV::CM_POP)
+    if (MBBI->getFlag(MachineInstr::FrameDestroy) && isPop(MBBI->getOpcode()))
       return MBBI;
 
   return MBB.end();
@@ -61,11 +85,12 @@ bool RISCVPushPopOpt::usePopRet(MachineBasicBlock::iterator &MBBI,
   // Since Pseudo instruction lowering happen later in the pipeline,
   // this will detect all ret instruction.
   DebugLoc DL = NextI->getDebugLoc();
-  unsigned Opc = IsReturnZero ? RISCV::CM_POPRETZ : RISCV::CM_POPRET;
+  unsigned Opc = getPopRetOpcode(MBBI->getOpcode(), IsReturnZero);
   MachineInstrBuilder PopRetBuilder =
       BuildMI(*NextI->getParent(), NextI, DL, TII->get(Opc))
           .add(MBBI->getOperand(0))
-          .add(MBBI->getOperand(1));
+          .add(MBBI->getOperand(1))
+          .setMIFlag(MachineInstr::FrameDestroy);
 
   // Copy over the variable implicit uses and defs from the CM_POP. They depend
   // on what register list has been picked during frame lowering.
@@ -120,12 +145,7 @@ bool RISCVPushPopOpt::runOnMachineFunction(MachineFunction &Fn) {
 
   // If Zcmp extension is not supported, abort.
   const RISCVSubtarget *Subtarget = &Fn.getSubtarget<RISCVSubtarget>();
-  if (!Subtarget->hasStdExtZcmp())
-    return false;
-
-  // If frame pointer elimination has been disabled, abort to avoid breaking the
-  // ABI.
-  if (Fn.getTarget().Options.DisableFramePointerElim(Fn))
+  if (!Subtarget->hasStdExtZcmp() && !Subtarget->hasVendorXqccmp())
     return false;
 
   TII = Subtarget->getInstrInfo();

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
new file mode 100644
index 0000000000000..5a5a1ccd2e63a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp,+e -target-abi ilp32e -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+
+define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
+; RV32-LABEL: func:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    qc.cm.push {ra, s0-s1}, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    .cfi_offset s1, -12
+; RV32-NEXT:    addi sp, sp, -4
+; RV32-NEXT:    .cfi_def_cfa_offset 20
+; RV32-NEXT:    sw a4, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a2, 0(sp) # 4-byte Folded Spill
+; RV32-NEXT:    mv a2, a1
+; RV32-NEXT:    mv s1, a0
+; RV32-NEXT:    li a0, 1
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:  .LBB0_1: # %while.body
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    mv s0, a0
+; RV32-NEXT:    li a0, 0
+; RV32-NEXT:    bnez a3, .LBB0_1
+; RV32-NEXT:  # %bb.2: # %while.end
+; RV32-NEXT:    lui a0, 4112
+; RV32-NEXT:    addi a1, a0, 257
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    call __mulsi3
+; RV32-NEXT:    sw a0, 0(zero)
+; RV32-NEXT:    andi s0, s0, 1
+; RV32-NEXT:    lw a0, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT:    add s0, s0, a0
+; RV32-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sb a0, 0(s0)
+; RV32-NEXT:    mv a0, s1
+; RV32-NEXT:    addi sp, sp, 4
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+entry:
+  br label %while.body
+
+while.body:                                       ; preds = %while.body, %entry
+  %n.addr.042 = phi i32 [ 1, %entry ], [ 0, %while.body ]
+  br i1 %0, label %while.body, label %while.end
+
+while.end:                                        ; preds = %while.body
+  %or5 = mul i32 %_c, 16843009
+  store i32 %or5, ptr null, align 4
+  %1 = and i32 %n.addr.042, 1
+  %scevgep = getelementptr i8, ptr %incdec.ptr, i32 %1
+  store i8 %conv14, ptr %scevgep, align 1
+  ret ptr %s
+}

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll
new file mode 100644
index 0000000000000..957469a3dabe4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll
@@ -0,0 +1,1179 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32IXQCCMP
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -verify-machineinstrs \
+; RUN:  -frame-pointer=all < %s | FileCheck %s -check-prefixes=RV32IXQCCMP-WITH-FP
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64IXQCCMP
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -verify-machineinstrs \
+; RUN:  -frame-pointer=all < %s | FileCheck %s -check-prefixes=RV64IXQCCMP-WITH-FP
+
+ at var = global [32 x i32] zeroinitializer
+
+; This function tests that RISCVRegisterInfo::getCalleeSavedRegs returns
+; something appropriate.
+
+define void @callee() {
+; RV32IXQCCMP-LABEL: callee:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -80
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 80
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-NEXT:    lui t0, %hi(var)
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var+4)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var+8)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var+12)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    addi a5, t0, %lo(var)
+; RV32IXQCCMP-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-NEXT:    lw s1, 80(a5)
+; RV32IXQCCMP-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-NEXT:    lw t1, 92(a5)
+; RV32IXQCCMP-NEXT:    lw a7, 112(a5)
+; RV32IXQCCMP-NEXT:    lw s0, 116(a5)
+; RV32IXQCCMP-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-NEXT:    lw a6, 96(a5)
+; RV32IXQCCMP-NEXT:    lw a4, 100(a5)
+; RV32IXQCCMP-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-NEXT:    sw s0, 116(a5)
+; RV32IXQCCMP-NEXT:    sw a7, 112(a5)
+; RV32IXQCCMP-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-NEXT:    sw a4, 100(a5)
+; RV32IXQCCMP-NEXT:    sw a6, 96(a5)
+; RV32IXQCCMP-NEXT:    sw t1, 92(a5)
+; RV32IXQCCMP-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-NEXT:    sw s1, 80(a5)
+; RV32IXQCCMP-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var+12)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var+8)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var+4)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var)(t0)
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 80
+;
+; RV32IXQCCMP-WITH-FP-LABEL: callee:
+; RV32IXQCCMP-WITH-FP:       # %bb.0:
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -80
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 80
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-WITH-FP-NEXT:    lui t1, %hi(var)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+4)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+8)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+12)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    addi a5, t1, %lo(var)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 24(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw t4, 80(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s1, 92(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw t0, 112(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a4, 116(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a7, 96(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a6, 100(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a4, 116(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw t0, 112(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a6, 100(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a7, 96(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s1, 92(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw t4, 80(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 24(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+12)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+8)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+4)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var)(t1)
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 80
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 80
+;
+; RV64IXQCCMP-LABEL: callee:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -160
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-NEXT:    lui t0, %hi(var)
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var+4)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var+8)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var+12)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    addi a5, t0, %lo(var)
+; RV64IXQCCMP-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-NEXT:    lw s1, 80(a5)
+; RV64IXQCCMP-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-NEXT:    lw t1, 92(a5)
+; RV64IXQCCMP-NEXT:    lw a7, 112(a5)
+; RV64IXQCCMP-NEXT:    lw s0, 116(a5)
+; RV64IXQCCMP-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-NEXT:    lw a6, 96(a5)
+; RV64IXQCCMP-NEXT:    lw a4, 100(a5)
+; RV64IXQCCMP-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-NEXT:    sw s0, 116(a5)
+; RV64IXQCCMP-NEXT:    sw a7, 112(a5)
+; RV64IXQCCMP-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-NEXT:    sw a4, 100(a5)
+; RV64IXQCCMP-NEXT:    sw a6, 96(a5)
+; RV64IXQCCMP-NEXT:    sw t1, 92(a5)
+; RV64IXQCCMP-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-NEXT:    sw s1, 80(a5)
+; RV64IXQCCMP-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var+12)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var+8)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var+4)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var)(t0)
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+;
+; RV64IXQCCMP-WITH-FP-LABEL: callee:
+; RV64IXQCCMP-WITH-FP:       # %bb.0:
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -160
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-WITH-FP-NEXT:    lui t1, %hi(var)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -112(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+4)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -120(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+8)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -128(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+12)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    addi a5, t1, %lo(var)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -144(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -152(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 24(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw t4, 80(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s1, 92(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw t0, 112(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a4, 116(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a7, 96(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a6, 100(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a4, 116(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw t0, 112(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a6, 100(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a7, 96(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s1, 92(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw t4, 80(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 24(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+12)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+8)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+4)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 160
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+  %val = load [32 x i32], ptr @var
+  store volatile [32 x i32] %val, ptr @var
+  ret void
+}
+
+; This function tests that RISCVRegisterInfo::getCallPreservedMask returns
+; something appropriate.
+
+define void @caller() {
+; RV32IXQCCMP-LABEL: caller:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -112
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-NEXT:    addi sp, sp, -32
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 144
+; RV32IXQCCMP-NEXT:    lui s0, %hi(var)
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var)(s0)
+; RV32IXQCCMP-NEXT:    sw a0, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var+4)(s0)
+; RV32IXQCCMP-NEXT:    sw a0, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var+8)(s0)
+; RV32IXQCCMP-NEXT:    sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var+12)(s0)
+; RV32IXQCCMP-NEXT:    sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    addi s1, s0, %lo(var)
+; RV32IXQCCMP-NEXT:    lw a0, 16(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 20(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 24(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 28(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 32(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 36(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 40(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 44(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 48(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 52(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 56(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 60(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 64(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 68(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 72(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 76(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 80(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 84(s1)
+; RV32IXQCCMP-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw s4, 88(s1)
+; RV32IXQCCMP-NEXT:    lw s5, 92(s1)
+; RV32IXQCCMP-NEXT:    lw s6, 96(s1)
+; RV32IXQCCMP-NEXT:    lw s7, 100(s1)
+; RV32IXQCCMP-NEXT:    lw s8, 104(s1)
+; RV32IXQCCMP-NEXT:    lw s9, 108(s1)
+; RV32IXQCCMP-NEXT:    lw s10, 112(s1)
+; RV32IXQCCMP-NEXT:    lw s11, 116(s1)
+; RV32IXQCCMP-NEXT:    lw s2, 120(s1)
+; RV32IXQCCMP-NEXT:    lw s3, 124(s1)
+; RV32IXQCCMP-NEXT:    call callee
+; RV32IXQCCMP-NEXT:    sw s3, 124(s1)
+; RV32IXQCCMP-NEXT:    sw s2, 120(s1)
+; RV32IXQCCMP-NEXT:    sw s11, 116(s1)
+; RV32IXQCCMP-NEXT:    sw s10, 112(s1)
+; RV32IXQCCMP-NEXT:    sw s9, 108(s1)
+; RV32IXQCCMP-NEXT:    sw s8, 104(s1)
+; RV32IXQCCMP-NEXT:    sw s7, 100(s1)
+; RV32IXQCCMP-NEXT:    sw s6, 96(s1)
+; RV32IXQCCMP-NEXT:    sw s5, 92(s1)
+; RV32IXQCCMP-NEXT:    sw s4, 88(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 84(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 80(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 76(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 72(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 68(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 64(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 60(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 56(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 52(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 48(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 44(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 40(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 36(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 32(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 28(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 24(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 20(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 16(s1)
+; RV32IXQCCMP-NEXT:    lw a0, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var+12)(s0)
+; RV32IXQCCMP-NEXT:    lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var+8)(s0)
+; RV32IXQCCMP-NEXT:    lw a0, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var+4)(s0)
+; RV32IXQCCMP-NEXT:    lw a0, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var)(s0)
+; RV32IXQCCMP-NEXT:    addi sp, sp, 32
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+;
+; RV32IXQCCMP-WITH-FP-LABEL: caller:
+; RV32IXQCCMP-WITH-FP:       # %bb.0:
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -112
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-WITH-FP-NEXT:    addi sp, sp, -32
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 144
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-WITH-FP-NEXT:    lui s6, %hi(var)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+4)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+8)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+12)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    addi s1, s6, %lo(var)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 16(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 20(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 24(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 28(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -84(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 32(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -88(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 36(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -92(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 40(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -96(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 44(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -100(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 48(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -104(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 52(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -108(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 56(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -112(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 60(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -116(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 64(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -120(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 68(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -124(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 72(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -128(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 76(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -132(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 80(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -136(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 84(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -140(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, 88(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, -144(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s8, 92(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s9, 96(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s10, 100(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s11, 104(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s2, 108(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s3, 112(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s4, 116(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s5, 120(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s7, 124(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    call callee
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s7, 124(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s5, 120(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s4, 116(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s3, 112(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s2, 108(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s11, 104(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s10, 100(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s9, 96(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s8, 92(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -144(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 88(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -140(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 84(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -136(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 80(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -132(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 76(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -128(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 72(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -124(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 68(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -120(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 64(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -116(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 60(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -112(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 56(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -108(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 52(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -104(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 48(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -100(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 44(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -96(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 40(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -92(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 36(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -88(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 32(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -84(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 28(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 24(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 20(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 16(s1)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+12)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+8)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+4)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var)(s6)
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 144
+; RV32IXQCCMP-WITH-FP-NEXT:    addi sp, sp, 32
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+;
+; RV64IXQCCMP-LABEL: caller:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -160
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-NEXT:    addi sp, sp, -128
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 288
+; RV64IXQCCMP-NEXT:    lui s0, %hi(var)
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var)(s0)
+; RV64IXQCCMP-NEXT:    sd a0, 176(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var+4)(s0)
+; RV64IXQCCMP-NEXT:    sd a0, 168(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var+8)(s0)
+; RV64IXQCCMP-NEXT:    sd a0, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var+12)(s0)
+; RV64IXQCCMP-NEXT:    sd a0, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    addi s1, s0, %lo(var)
+; RV64IXQCCMP-NEXT:    lw a0, 16(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 20(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 24(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 28(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 32(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 36(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 40(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 44(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 48(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 52(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 56(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 60(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 64(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 68(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 72(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 76(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 80(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 84(s1)
+; RV64IXQCCMP-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw s4, 88(s1)
+; RV64IXQCCMP-NEXT:    lw s5, 92(s1)
+; RV64IXQCCMP-NEXT:    lw s6, 96(s1)
+; RV64IXQCCMP-NEXT:    lw s7, 100(s1)
+; RV64IXQCCMP-NEXT:    lw s8, 104(s1)
+; RV64IXQCCMP-NEXT:    lw s9, 108(s1)
+; RV64IXQCCMP-NEXT:    lw s10, 112(s1)
+; RV64IXQCCMP-NEXT:    lw s11, 116(s1)
+; RV64IXQCCMP-NEXT:    lw s2, 120(s1)
+; RV64IXQCCMP-NEXT:    lw s3, 124(s1)
+; RV64IXQCCMP-NEXT:    call callee
+; RV64IXQCCMP-NEXT:    sw s3, 124(s1)
+; RV64IXQCCMP-NEXT:    sw s2, 120(s1)
+; RV64IXQCCMP-NEXT:    sw s11, 116(s1)
+; RV64IXQCCMP-NEXT:    sw s10, 112(s1)
+; RV64IXQCCMP-NEXT:    sw s9, 108(s1)
+; RV64IXQCCMP-NEXT:    sw s8, 104(s1)
+; RV64IXQCCMP-NEXT:    sw s7, 100(s1)
+; RV64IXQCCMP-NEXT:    sw s6, 96(s1)
+; RV64IXQCCMP-NEXT:    sw s5, 92(s1)
+; RV64IXQCCMP-NEXT:    sw s4, 88(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 84(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 80(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 76(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 72(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 68(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 64(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 60(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 56(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 52(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 48(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 44(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 40(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 36(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 32(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 28(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 24(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 20(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 16(s1)
+; RV64IXQCCMP-NEXT:    ld a0, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var+12)(s0)
+; RV64IXQCCMP-NEXT:    ld a0, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var+8)(s0)
+; RV64IXQCCMP-NEXT:    ld a0, 168(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var+4)(s0)
+; RV64IXQCCMP-NEXT:    ld a0, 176(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var)(s0)
+; RV64IXQCCMP-NEXT:    addi sp, sp, 128
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+;
+; RV64IXQCCMP-WITH-FP-LABEL: caller:
+; RV64IXQCCMP-WITH-FP:       # %bb.0:
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -160
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-WITH-FP-NEXT:    addi sp, sp, -128
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 288
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-WITH-FP-NEXT:    lui s6, %hi(var)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -112(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+4)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -120(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+8)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -128(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, %lo(var+12)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    addi s1, s6, %lo(var)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 16(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -144(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 20(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -152(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 24(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 28(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -168(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 32(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -176(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 36(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -184(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 40(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -192(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 44(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -200(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 48(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -208(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 52(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -216(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 56(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -224(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 60(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -232(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 64(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -240(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 68(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -248(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 72(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -256(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 76(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -264(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 80(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -272(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 84(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -280(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw a0, 88(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, -288(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s8, 92(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s9, 96(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s10, 100(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s11, 104(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s2, 108(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s3, 112(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s4, 116(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s5, 120(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    lw s7, 124(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    call callee
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s7, 124(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s5, 120(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s4, 116(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s3, 112(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s2, 108(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s11, 104(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s10, 100(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s9, 96(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    sw s8, 92(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -288(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 88(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -280(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 84(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -272(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 80(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -264(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 76(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -256(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 72(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -248(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 68(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -240(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 64(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -232(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 60(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -224(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 56(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -216(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 52(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -208(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 48(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -200(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 44(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -192(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 40(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -184(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 36(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -176(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 32(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -168(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 28(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 24(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 20(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, 16(s1)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+12)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+8)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var+4)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    sw a0, %lo(var)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 288
+; RV64IXQCCMP-WITH-FP-NEXT:    addi sp, sp, 128
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+  %val = load [32 x i32], ptr @var
+  call void @callee()
+  store volatile [32 x i32] %val, ptr @var
+  ret void
+}
+
+; This function tests if the stack size is correctly calculated when
+; callee-saved registers are not a sequential list from $ra
+define void @foo() {
+; RV32IXQCCMP-LABEL: foo:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s4}, -32
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 32
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    #APP
+; RV32IXQCCMP-NEXT:    li s4, 0
+; RV32IXQCCMP-NEXT:    #NO_APP
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s4}, 32
+;
+; RV32IXQCCMP-WITH-FP-LABEL: foo:
+; RV32IXQCCMP-WITH-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s4}, -32
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 32
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-WITH-FP-NEXT:    #APP
+; RV32IXQCCMP-WITH-FP-NEXT:    li s4, 0
+; RV32IXQCCMP-WITH-FP-NEXT:    #NO_APP
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 32
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s4}, 32
+;
+; RV64IXQCCMP-LABEL: foo:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s4}, -48
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 48
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    #APP
+; RV64IXQCCMP-NEXT:    li s4, 0
+; RV64IXQCCMP-NEXT:    #NO_APP
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s4}, 48
+;
+; RV64IXQCCMP-WITH-FP-LABEL: foo:
+; RV64IXQCCMP-WITH-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s4}, -48
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 48
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-WITH-FP-NEXT:    #APP
+; RV64IXQCCMP-WITH-FP-NEXT:    li s4, 0
+; RV64IXQCCMP-WITH-FP-NEXT:    #NO_APP
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 48
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s4}, 48
+entry:
+  tail call void asm sideeffect "li s4, 0", "~{s4}"()
+  ret void
+}
+
+; Check .cfi_offset of s11 is correct for Zcmp.
+define void @bar() {
+; RV32IXQCCMP-LABEL: bar:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -64
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-NEXT:    #APP
+; RV32IXQCCMP-NEXT:    li s11, 0
+; RV32IXQCCMP-NEXT:    #NO_APP
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 64
+;
+; RV32IXQCCMP-WITH-FP-LABEL: bar:
+; RV32IXQCCMP-WITH-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -64
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-WITH-FP-NEXT:    #APP
+; RV32IXQCCMP-WITH-FP-NEXT:    li s11, 0
+; RV32IXQCCMP-WITH-FP-NEXT:    #NO_APP
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 64
+; RV32IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 64
+;
+; RV64IXQCCMP-LABEL: bar:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -112
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 112
+; RV64IXQCCMP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-NEXT:    #APP
+; RV64IXQCCMP-NEXT:    li s11, 0
+; RV64IXQCCMP-NEXT:    #NO_APP
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+;
+; RV64IXQCCMP-WITH-FP-LABEL: bar:
+; RV64IXQCCMP-WITH-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -112
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 112
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-WITH-FP-NEXT:    #APP
+; RV64IXQCCMP-WITH-FP-NEXT:    li s11, 0
+; RV64IXQCCMP-WITH-FP-NEXT:    #NO_APP
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 112
+; RV64IXQCCMP-WITH-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+entry:
+  tail call void asm sideeffect "li s11, 0", "~{s11}"()
+  ret void
+}
+
+define void @varargs(...) {
+; RV32IXQCCMP-LABEL: varargs:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    addi sp, sp, -48
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -36
+; RV32IXQCCMP-NEXT:    sw a4, 32(sp)
+; RV32IXQCCMP-NEXT:    sw a5, 36(sp)
+; RV32IXQCCMP-NEXT:    sw a6, 40(sp)
+; RV32IXQCCMP-NEXT:    sw a7, 44(sp)
+; RV32IXQCCMP-NEXT:    sw a0, 16(sp)
+; RV32IXQCCMP-NEXT:    sw a1, 20(sp)
+; RV32IXQCCMP-NEXT:    sw a2, 24(sp)
+; RV32IXQCCMP-NEXT:    sw a3, 28(sp)
+; RV32IXQCCMP-NEXT:    call callee
+; RV32IXQCCMP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-NEXT:    addi sp, sp, 48
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-NEXT:    ret
+;
+; RV32IXQCCMP-WITH-FP-LABEL: varargs:
+; RV32IXQCCMP-WITH-FP:       # %bb.0:
+; RV32IXQCCMP-WITH-FP-NEXT:    addi sp, sp, -48
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-WITH-FP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -36
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -40
+; RV32IXQCCMP-WITH-FP-NEXT:    addi s0, sp, 16
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 32
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a4, 16(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a5, 20(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a6, 24(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a7, 28(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a0, 0(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a1, 4(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a2, 8(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    sw a3, 12(s0)
+; RV32IXQCCMP-WITH-FP-NEXT:    call callee
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 48
+; RV32IXQCCMP-WITH-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-WITH-FP-NEXT:    addi sp, sp, 48
+; RV32IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-WITH-FP-NEXT:    ret
+;
+; RV64IXQCCMP-LABEL: varargs:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    addi sp, sp, -80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -72
+; RV64IXQCCMP-NEXT:    sd a4, 48(sp)
+; RV64IXQCCMP-NEXT:    sd a5, 56(sp)
+; RV64IXQCCMP-NEXT:    sd a6, 64(sp)
+; RV64IXQCCMP-NEXT:    sd a7, 72(sp)
+; RV64IXQCCMP-NEXT:    sd a0, 16(sp)
+; RV64IXQCCMP-NEXT:    sd a1, 24(sp)
+; RV64IXQCCMP-NEXT:    sd a2, 32(sp)
+; RV64IXQCCMP-NEXT:    sd a3, 40(sp)
+; RV64IXQCCMP-NEXT:    call callee
+; RV64IXQCCMP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-NEXT:    addi sp, sp, 80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-NEXT:    ret
+;
+; RV64IXQCCMP-WITH-FP-LABEL: varargs:
+; RV64IXQCCMP-WITH-FP:       # %bb.0:
+; RV64IXQCCMP-WITH-FP-NEXT:    addi sp, sp, -80
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-WITH-FP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset ra, -72
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_offset s0, -80
+; RV64IXQCCMP-WITH-FP-NEXT:    addi s0, sp, 16
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa s0, 64
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a4, 32(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a5, 40(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a6, 48(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a7, 56(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a0, 0(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a1, 8(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a2, 16(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    sd a3, 24(s0)
+; RV64IXQCCMP-WITH-FP-NEXT:    call callee
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa sp, 80
+; RV64IXQCCMP-WITH-FP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-WITH-FP-NEXT:    addi sp, sp, 80
+; RV64IXQCCMP-WITH-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-WITH-FP-NEXT:    ret
+  call void @callee()
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp-cm-popretz.mir b/llvm/test/CodeGen/RISCV/xqccmp-cm-popretz.mir
new file mode 100644
index 0000000000000..482e6ff7b62af
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp-cm-popretz.mir
@@ -0,0 +1,66 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -x mir -start-before=prologepilog -stop-after=riscv-push-pop-opt -verify-machineinstrs -o - %s \
+# RUN: | FileCheck -check-prefixes=CHECK-XQCCMP32 %s
+# RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -x mir -start-before=prologepilog -stop-after=riscv-push-pop-opt -verify-machineinstrs -o - %s \
+# RUN: | FileCheck -check-prefixes=CHECK-XQCCMP64 %s
+---
+name: popret_rvlist5
+tracksRegLiveness: true
+body:                   |
+  bb.0:
+    ; CHECK-XQCCMP32-LABEL: name: popret_rvlist5
+    ; CHECK-XQCCMP32: liveins: $x1, $x8
+    ; CHECK-XQCCMP32-NEXT: {{  $}}
+    ; CHECK-XQCCMP32-NEXT: frame-setup QC_CM_PUSH 5, 0, implicit-def $x2, implicit $x2, implicit $x1, implicit $x8
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -4
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
+    ; CHECK-XQCCMP32-NEXT: $x1 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: frame-destroy QC_CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ;
+    ; CHECK-XQCCMP64-LABEL: name: popret_rvlist5
+    ; CHECK-XQCCMP64: liveins: $x1, $x8
+    ; CHECK-XQCCMP64-NEXT: {{  $}}
+    ; CHECK-XQCCMP64-NEXT: frame-setup QC_CM_PUSH 5, 0, implicit-def $x2, implicit $x2, implicit $x1, implicit $x8
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -8
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -16
+    ; CHECK-XQCCMP64-NEXT: $x1 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: frame-destroy QC_CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    $x1 = IMPLICIT_DEF
+    $x8 = IMPLICIT_DEF
+    PseudoRET
+...
+---
+name: popretz_rvlist5
+tracksRegLiveness: true
+body:                   |
+  bb.0:
+    ; CHECK-XQCCMP32-LABEL: name: popretz_rvlist5
+    ; CHECK-XQCCMP32: liveins: $x1, $x8
+    ; CHECK-XQCCMP32-NEXT: {{  $}}
+    ; CHECK-XQCCMP32-NEXT: frame-setup QC_CM_PUSH 5, 0, implicit-def $x2, implicit $x2, implicit $x1, implicit $x8
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -4
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
+    ; CHECK-XQCCMP32-NEXT: $x1 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: frame-destroy QC_CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
+    ;
+    ; CHECK-XQCCMP64-LABEL: name: popretz_rvlist5
+    ; CHECK-XQCCMP64: liveins: $x1, $x8
+    ; CHECK-XQCCMP64-NEXT: {{  $}}
+    ; CHECK-XQCCMP64-NEXT: frame-setup QC_CM_PUSH 5, 0, implicit-def $x2, implicit $x2, implicit $x1, implicit $x8
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -8
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -16
+    ; CHECK-XQCCMP64-NEXT: $x1 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: frame-destroy QC_CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
+    $x1 = IMPLICIT_DEF
+    $x8 = IMPLICIT_DEF
+    $x10 = COPY $x0
+    PseudoRET implicit $x10
+...

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp-cm-push-pop.mir b/llvm/test/CodeGen/RISCV/xqccmp-cm-push-pop.mir
new file mode 100644
index 0000000000000..9dd4e86dcb0dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp-cm-push-pop.mir
@@ -0,0 +1,92 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -x mir -run-pass=prologepilog -verify-machineinstrs -o - %s \
+# RUN: | FileCheck -check-prefixes=CHECK-XQCCMP32 %s
+# RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -x mir -run-pass=prologepilog -verify-machineinstrs -o - %s \
+# RUN: | FileCheck -check-prefixes=CHECK-XQCCMP64 %s
+---
+name: push_rvlist15
+tracksRegLiveness: true
+body:                   |
+  bb.0:
+    ; CHECK-XQCCMP32-LABEL: name: push_rvlist15
+    ; CHECK-XQCCMP32: liveins: $x1, $x8, $x9, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
+    ; CHECK-XQCCMP32-NEXT: {{  $}}
+    ; CHECK-XQCCMP32-NEXT: frame-setup QC_CM_PUSH 15, 0, implicit-def $x2, implicit $x2, implicit $x1, implicit $x8, implicit $x9, implicit $x18, implicit $x19, implicit $x20, implicit $x21, implicit $x22, implicit $x23, implicit $x24, implicit $x25, implicit $x26, implicit $x27
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 64
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -4
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x9, -12
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x18, -16
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x19, -20
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x20, -24
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x21, -28
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x22, -32
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x23, -36
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x24, -40
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x25, -44
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x26, -48
+    ; CHECK-XQCCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x27, -52
+    ; CHECK-XQCCMP32-NEXT: $x1 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x9 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x18 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x19 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x20 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x21 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x22 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x23 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x24 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x25 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x26 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: $x27 = IMPLICIT_DEF
+    ; CHECK-XQCCMP32-NEXT: frame-destroy QC_CM_POP 15, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8, implicit-def $x9, implicit-def $x18, implicit-def $x19, implicit-def $x20, implicit-def $x21, implicit-def $x22, implicit-def $x23, implicit-def $x24, implicit-def $x25, implicit-def $x26, implicit-def $x27
+    ; CHECK-XQCCMP32-NEXT: PseudoRET
+    ;
+    ; CHECK-XQCCMP64-LABEL: name: push_rvlist15
+    ; CHECK-XQCCMP64: liveins: $x1, $x8, $x9, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
+    ; CHECK-XQCCMP64-NEXT: {{  $}}
+    ; CHECK-XQCCMP64-NEXT: frame-setup QC_CM_PUSH 15, 0, implicit-def $x2, implicit $x2, implicit $x1, implicit $x8, implicit $x9, implicit $x18, implicit $x19, implicit $x20, implicit $x21, implicit $x22, implicit $x23, implicit $x24, implicit $x25, implicit $x26, implicit $x27
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 112
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -8
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -16
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x9, -24
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x18, -32
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x19, -40
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x20, -48
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x21, -56
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x22, -64
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x23, -72
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x24, -80
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x25, -88
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x26, -96
+    ; CHECK-XQCCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x27, -104
+    ; CHECK-XQCCMP64-NEXT: $x1 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x8 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x9 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x18 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x19 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x20 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x21 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x22 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x23 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x24 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x25 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x26 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: $x27 = IMPLICIT_DEF
+    ; CHECK-XQCCMP64-NEXT: frame-destroy QC_CM_POP 15, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8, implicit-def $x9, implicit-def $x18, implicit-def $x19, implicit-def $x20, implicit-def $x21, implicit-def $x22, implicit-def $x23, implicit-def $x24, implicit-def $x25, implicit-def $x26, implicit-def $x27
+    ; CHECK-XQCCMP64-NEXT: PseudoRET
+    $x1 = IMPLICIT_DEF
+    $x8 = IMPLICIT_DEF
+    $x9 = IMPLICIT_DEF
+    $x18 = IMPLICIT_DEF
+    $x19 = IMPLICIT_DEF
+    $x20 = IMPLICIT_DEF
+    $x21 = IMPLICIT_DEF
+    $x22 = IMPLICIT_DEF
+    $x23 = IMPLICIT_DEF
+    $x24 = IMPLICIT_DEF
+    $x25 = IMPLICIT_DEF
+    $x26 = IMPLICIT_DEF
+    $x27 = IMPLICIT_DEF
+    PseudoRET
+...

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll
new file mode 100644
index 0000000000000..c1a5e6093f1c5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll
@@ -0,0 +1,3951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32IXQCCMP
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64IXQCCMP
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -frame-pointer=all \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IXQCCMP-FP
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -frame-pointer=all \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IXQCCMP-FP
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp,+save-restore \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IXQCCMP-SR
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp,+save-restore \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IXQCCMP-SR
+
+declare void @test(ptr)
+declare void @callee_void(ptr)
+declare i32 @callee(ptr)
+
+define i32 @foo() {
+; RV32IXQCCMP-LABEL: foo:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra}, -64
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    addi sp, sp, -464
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 528
+; RV32IXQCCMP-NEXT:    addi a0, sp, 12
+; RV32IXQCCMP-NEXT:    call test
+; RV32IXQCCMP-NEXT:    addi sp, sp, 464
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-NEXT:    qc.cm.popretz {ra}, 64
+;
+; RV64IXQCCMP-LABEL: foo:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra}, -64
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    addi sp, sp, -464
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 528
+; RV64IXQCCMP-NEXT:    addi a0, sp, 8
+; RV64IXQCCMP-NEXT:    call test
+; RV64IXQCCMP-NEXT:    addi sp, sp, 464
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-NEXT:    qc.cm.popretz {ra}, 64
+;
+; RV32IXQCCMP-FP-LABEL: foo:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -64
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, -464
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 528
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, s0, -520
+; RV32IXQCCMP-FP-NEXT:    call test
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 528
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, 464
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popretz {ra, s0}, 64
+;
+; RV64IXQCCMP-FP-LABEL: foo:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, -464
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 528
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    addi a0, s0, -528
+; RV64IXQCCMP-FP-NEXT:    call test
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 528
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, 464
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popretz {ra, s0}, 64
+;
+; RV32IXQCCMP-SR-LABEL: foo:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra}, -64
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    addi sp, sp, -464
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 528
+; RV32IXQCCMP-SR-NEXT:    addi a0, sp, 12
+; RV32IXQCCMP-SR-NEXT:    call test
+; RV32IXQCCMP-SR-NEXT:    addi sp, sp, 464
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popretz {ra}, 64
+;
+; RV64IXQCCMP-SR-LABEL: foo:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra}, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, -464
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 528
+; RV64IXQCCMP-SR-NEXT:    addi a0, sp, 8
+; RV64IXQCCMP-SR-NEXT:    call test
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, 464
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popretz {ra}, 64
+  %1 = alloca [512 x i8]
+  %2 = getelementptr [512 x i8], ptr %1, i32 0, i32 0
+  call void @test(ptr %2)
+  ret i32 0
+}
+
+define i32 @pushpopret0(i32 signext %size) {
+; RV32IXQCCMP-LABEL: pushpopret0:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, a0
+; RV32IXQCCMP-NEXT:    call callee_void
+; RV32IXQCCMP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.popretz {ra, s0}, 16
+;
+; RV64IXQCCMP-LABEL: pushpopret0:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, a0
+; RV64IXQCCMP-NEXT:    call callee_void
+; RV64IXQCCMP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-NEXT:    qc.cm.popretz {ra, s0}, 16
+;
+; RV32IXQCCMP-FP-LABEL: pushpopret0:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, a0
+; RV32IXQCCMP-FP-NEXT:    call callee_void
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popretz {ra, s0}, 16
+;
+; RV64IXQCCMP-FP-LABEL: pushpopret0:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, a0
+; RV64IXQCCMP-FP-NEXT:    call callee_void
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popretz {ra, s0}, 16
+;
+; RV32IXQCCMP-SR-LABEL: pushpopret0:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, a0
+; RV32IXQCCMP-SR-NEXT:    call callee_void
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popretz {ra, s0}, 16
+;
+; RV64IXQCCMP-SR-LABEL: pushpopret0:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, a0
+; RV64IXQCCMP-SR-NEXT:    call callee_void
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popretz {ra, s0}, 16
+entry:
+  %0 = alloca i8, i32 %size, align 16
+  call void @callee_void(ptr nonnull %0)
+  ret i32 0
+}
+
+define i32 @pushpopret1(i32 signext %size) {
+; RV32IXQCCMP-LABEL: pushpopret1:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, a0
+; RV32IXQCCMP-NEXT:    call callee_void
+; RV32IXQCCMP-NEXT:    li a0, 1
+; RV32IXQCCMP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-LABEL: pushpopret1:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, a0
+; RV64IXQCCMP-NEXT:    call callee_void
+; RV64IXQCCMP-NEXT:    li a0, 1
+; RV64IXQCCMP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-FP-LABEL: pushpopret1:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, a0
+; RV32IXQCCMP-FP-NEXT:    call callee_void
+; RV32IXQCCMP-FP-NEXT:    li a0, 1
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-FP-LABEL: pushpopret1:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, a0
+; RV64IXQCCMP-FP-NEXT:    call callee_void
+; RV64IXQCCMP-FP-NEXT:    li a0, 1
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-SR-LABEL: pushpopret1:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, a0
+; RV32IXQCCMP-SR-NEXT:    call callee_void
+; RV32IXQCCMP-SR-NEXT:    li a0, 1
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-SR-LABEL: pushpopret1:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, a0
+; RV64IXQCCMP-SR-NEXT:    call callee_void
+; RV64IXQCCMP-SR-NEXT:    li a0, 1
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0}, 16
+entry:
+  %0 = alloca i8, i32 %size, align 16
+  call void @callee_void(ptr nonnull %0)
+  ret i32 1
+}
+
+define i32 @pushpopretneg1(i32 signext %size) {
+; RV32IXQCCMP-LABEL: pushpopretneg1:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, a0
+; RV32IXQCCMP-NEXT:    call callee_void
+; RV32IXQCCMP-NEXT:    li a0, -1
+; RV32IXQCCMP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-LABEL: pushpopretneg1:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, a0
+; RV64IXQCCMP-NEXT:    call callee_void
+; RV64IXQCCMP-NEXT:    li a0, -1
+; RV64IXQCCMP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-FP-LABEL: pushpopretneg1:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, a0
+; RV32IXQCCMP-FP-NEXT:    call callee_void
+; RV32IXQCCMP-FP-NEXT:    li a0, -1
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-FP-LABEL: pushpopretneg1:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, a0
+; RV64IXQCCMP-FP-NEXT:    call callee_void
+; RV64IXQCCMP-FP-NEXT:    li a0, -1
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-SR-LABEL: pushpopretneg1:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, a0
+; RV32IXQCCMP-SR-NEXT:    call callee_void
+; RV32IXQCCMP-SR-NEXT:    li a0, -1
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-SR-LABEL: pushpopretneg1:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, a0
+; RV64IXQCCMP-SR-NEXT:    call callee_void
+; RV64IXQCCMP-SR-NEXT:    li a0, -1
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0}, 16
+entry:
+  %0 = alloca i8, i32 %size, align 16
+  call void @callee_void(ptr nonnull %0)
+  ret i32 -1
+}
+
+define i32 @pushpopret2(i32 signext %size) {
+; RV32IXQCCMP-LABEL: pushpopret2:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, a0
+; RV32IXQCCMP-NEXT:    call callee_void
+; RV32IXQCCMP-NEXT:    li a0, 2
+; RV32IXQCCMP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-LABEL: pushpopret2:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, a0
+; RV64IXQCCMP-NEXT:    call callee_void
+; RV64IXQCCMP-NEXT:    li a0, 2
+; RV64IXQCCMP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-FP-LABEL: pushpopret2:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, a0
+; RV32IXQCCMP-FP-NEXT:    call callee_void
+; RV32IXQCCMP-FP-NEXT:    li a0, 2
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-FP-LABEL: pushpopret2:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, a0
+; RV64IXQCCMP-FP-NEXT:    call callee_void
+; RV64IXQCCMP-FP-NEXT:    li a0, 2
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-SR-LABEL: pushpopret2:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, a0
+; RV32IXQCCMP-SR-NEXT:    call callee_void
+; RV32IXQCCMP-SR-NEXT:    li a0, 2
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-SR-LABEL: pushpopret2:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, a0
+; RV64IXQCCMP-SR-NEXT:    call callee_void
+; RV64IXQCCMP-SR-NEXT:    li a0, 2
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0}, 16
+entry:
+  %0 = alloca i8, i32 %size, align 16
+  call void @callee_void(ptr nonnull %0)
+  ret i32 2
+}
+
+define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
+; RV32IXQCCMP-LABEL: tailcall:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, a0
+; RV32IXQCCMP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.pop {ra, s0}, 16
+; RV32IXQCCMP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-NEXT:    tail callee
+;
+; RV64IXQCCMP-LABEL: tailcall:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, a0
+; RV64IXQCCMP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-NEXT:    qc.cm.pop {ra, s0}, 16
+; RV64IXQCCMP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-NEXT:    tail callee
+;
+; RV32IXQCCMP-FP-LABEL: tailcall:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, a0
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0}, 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-FP-NEXT:    tail callee
+;
+; RV64IXQCCMP-FP-LABEL: tailcall:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, a0
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0}, 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-FP-NEXT:    tail callee
+;
+; RV32IXQCCMP-SR-LABEL: tailcall:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, a0
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pop {ra, s0}, 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-SR-NEXT:    tail callee
+;
+; RV64IXQCCMP-SR-LABEL: tailcall:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, a0
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pop {ra, s0}, 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-SR-NEXT:    tail callee
+entry:
+  %0 = alloca i8, i32 %size, align 16
+  %1 = tail call i32 @callee(ptr nonnull %0)
+  ret i32 %1
+}
+
+ at var = global [5 x i32] zeroinitializer
+define i32 @nocompress(i32 signext %size) {
+; RV32IXQCCMP-LABEL: nocompress:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0-s8}, -48
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub s2, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, s2
+; RV32IXQCCMP-NEXT:    lui s1, %hi(var)
+; RV32IXQCCMP-NEXT:    lw s3, %lo(var)(s1)
+; RV32IXQCCMP-NEXT:    lw s4, %lo(var+4)(s1)
+; RV32IXQCCMP-NEXT:    lw s5, %lo(var+8)(s1)
+; RV32IXQCCMP-NEXT:    lw s6, %lo(var+12)(s1)
+; RV32IXQCCMP-NEXT:    addi s7, s1, %lo(var)
+; RV32IXQCCMP-NEXT:    lw s8, 16(s7)
+; RV32IXQCCMP-NEXT:    mv a0, s2
+; RV32IXQCCMP-NEXT:    call callee_void
+; RV32IXQCCMP-NEXT:    sw s8, 16(s7)
+; RV32IXQCCMP-NEXT:    sw s6, %lo(var+12)(s1)
+; RV32IXQCCMP-NEXT:    sw s5, %lo(var+8)(s1)
+; RV32IXQCCMP-NEXT:    sw s4, %lo(var+4)(s1)
+; RV32IXQCCMP-NEXT:    sw s3, %lo(var)(s1)
+; RV32IXQCCMP-NEXT:    mv a0, s2
+; RV32IXQCCMP-NEXT:    addi sp, s0, -48
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 48
+; RV32IXQCCMP-NEXT:    qc.cm.pop {ra, s0-s8}, 48
+; RV32IXQCCMP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-NEXT:    .cfi_restore s1
+; RV32IXQCCMP-NEXT:    .cfi_restore s2
+; RV32IXQCCMP-NEXT:    .cfi_restore s3
+; RV32IXQCCMP-NEXT:    .cfi_restore s4
+; RV32IXQCCMP-NEXT:    .cfi_restore s5
+; RV32IXQCCMP-NEXT:    .cfi_restore s6
+; RV32IXQCCMP-NEXT:    .cfi_restore s7
+; RV32IXQCCMP-NEXT:    .cfi_restore s8
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-NEXT:    tail callee
+;
+; RV64IXQCCMP-LABEL: nocompress:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0-s8}, -80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub s2, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, s2
+; RV64IXQCCMP-NEXT:    lui s1, %hi(var)
+; RV64IXQCCMP-NEXT:    lw s3, %lo(var)(s1)
+; RV64IXQCCMP-NEXT:    lw s4, %lo(var+4)(s1)
+; RV64IXQCCMP-NEXT:    lw s5, %lo(var+8)(s1)
+; RV64IXQCCMP-NEXT:    lw s6, %lo(var+12)(s1)
+; RV64IXQCCMP-NEXT:    addi s7, s1, %lo(var)
+; RV64IXQCCMP-NEXT:    lw s8, 16(s7)
+; RV64IXQCCMP-NEXT:    mv a0, s2
+; RV64IXQCCMP-NEXT:    call callee_void
+; RV64IXQCCMP-NEXT:    sw s8, 16(s7)
+; RV64IXQCCMP-NEXT:    sw s6, %lo(var+12)(s1)
+; RV64IXQCCMP-NEXT:    sw s5, %lo(var+8)(s1)
+; RV64IXQCCMP-NEXT:    sw s4, %lo(var+4)(s1)
+; RV64IXQCCMP-NEXT:    sw s3, %lo(var)(s1)
+; RV64IXQCCMP-NEXT:    mv a0, s2
+; RV64IXQCCMP-NEXT:    addi sp, s0, -80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 80
+; RV64IXQCCMP-NEXT:    qc.cm.pop {ra, s0-s8}, 80
+; RV64IXQCCMP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-NEXT:    .cfi_restore s1
+; RV64IXQCCMP-NEXT:    .cfi_restore s2
+; RV64IXQCCMP-NEXT:    .cfi_restore s3
+; RV64IXQCCMP-NEXT:    .cfi_restore s4
+; RV64IXQCCMP-NEXT:    .cfi_restore s5
+; RV64IXQCCMP-NEXT:    .cfi_restore s6
+; RV64IXQCCMP-NEXT:    .cfi_restore s7
+; RV64IXQCCMP-NEXT:    .cfi_restore s8
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-NEXT:    tail callee
+;
+; RV32IXQCCMP-FP-LABEL: nocompress:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s8}, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub s2, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, s2
+; RV32IXQCCMP-FP-NEXT:    lui s1, %hi(var)
+; RV32IXQCCMP-FP-NEXT:    lw s3, %lo(var)(s1)
+; RV32IXQCCMP-FP-NEXT:    lw s4, %lo(var+4)(s1)
+; RV32IXQCCMP-FP-NEXT:    lw s5, %lo(var+8)(s1)
+; RV32IXQCCMP-FP-NEXT:    lw s6, %lo(var+12)(s1)
+; RV32IXQCCMP-FP-NEXT:    addi s7, s1, %lo(var)
+; RV32IXQCCMP-FP-NEXT:    lw s8, 16(s7)
+; RV32IXQCCMP-FP-NEXT:    mv a0, s2
+; RV32IXQCCMP-FP-NEXT:    call callee_void
+; RV32IXQCCMP-FP-NEXT:    sw s8, 16(s7)
+; RV32IXQCCMP-FP-NEXT:    sw s6, %lo(var+12)(s1)
+; RV32IXQCCMP-FP-NEXT:    sw s5, %lo(var+8)(s1)
+; RV32IXQCCMP-FP-NEXT:    sw s4, %lo(var+4)(s1)
+; RV32IXQCCMP-FP-NEXT:    sw s3, %lo(var)(s1)
+; RV32IXQCCMP-FP-NEXT:    mv a0, s2
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 48
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0-s8}, 48
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s1
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s2
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s3
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s4
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s5
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s6
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s7
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-FP-NEXT:    tail callee
+;
+; RV64IXQCCMP-FP-LABEL: nocompress:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s8}, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub s2, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, s2
+; RV64IXQCCMP-FP-NEXT:    lui s1, %hi(var)
+; RV64IXQCCMP-FP-NEXT:    lw s3, %lo(var)(s1)
+; RV64IXQCCMP-FP-NEXT:    lw s4, %lo(var+4)(s1)
+; RV64IXQCCMP-FP-NEXT:    lw s5, %lo(var+8)(s1)
+; RV64IXQCCMP-FP-NEXT:    lw s6, %lo(var+12)(s1)
+; RV64IXQCCMP-FP-NEXT:    addi s7, s1, %lo(var)
+; RV64IXQCCMP-FP-NEXT:    lw s8, 16(s7)
+; RV64IXQCCMP-FP-NEXT:    mv a0, s2
+; RV64IXQCCMP-FP-NEXT:    call callee_void
+; RV64IXQCCMP-FP-NEXT:    sw s8, 16(s7)
+; RV64IXQCCMP-FP-NEXT:    sw s6, %lo(var+12)(s1)
+; RV64IXQCCMP-FP-NEXT:    sw s5, %lo(var+8)(s1)
+; RV64IXQCCMP-FP-NEXT:    sw s4, %lo(var+4)(s1)
+; RV64IXQCCMP-FP-NEXT:    sw s3, %lo(var)(s1)
+; RV64IXQCCMP-FP-NEXT:    mv a0, s2
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 80
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0-s8}, 80
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s1
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s2
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s3
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s4
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s5
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s6
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s7
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s8
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-FP-NEXT:    tail callee
+;
+; RV32IXQCCMP-SR-LABEL: nocompress:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0-s8}, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub s2, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, s2
+; RV32IXQCCMP-SR-NEXT:    lui s1, %hi(var)
+; RV32IXQCCMP-SR-NEXT:    lw s3, %lo(var)(s1)
+; RV32IXQCCMP-SR-NEXT:    lw s4, %lo(var+4)(s1)
+; RV32IXQCCMP-SR-NEXT:    lw s5, %lo(var+8)(s1)
+; RV32IXQCCMP-SR-NEXT:    lw s6, %lo(var+12)(s1)
+; RV32IXQCCMP-SR-NEXT:    addi s7, s1, %lo(var)
+; RV32IXQCCMP-SR-NEXT:    lw s8, 16(s7)
+; RV32IXQCCMP-SR-NEXT:    mv a0, s2
+; RV32IXQCCMP-SR-NEXT:    call callee_void
+; RV32IXQCCMP-SR-NEXT:    sw s8, 16(s7)
+; RV32IXQCCMP-SR-NEXT:    sw s6, %lo(var+12)(s1)
+; RV32IXQCCMP-SR-NEXT:    sw s5, %lo(var+8)(s1)
+; RV32IXQCCMP-SR-NEXT:    sw s4, %lo(var+4)(s1)
+; RV32IXQCCMP-SR-NEXT:    sw s3, %lo(var)(s1)
+; RV32IXQCCMP-SR-NEXT:    mv a0, s2
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 48
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pop {ra, s0-s8}, 48
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s1
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s2
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s3
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s4
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s5
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s6
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s7
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s8
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-SR-NEXT:    tail callee
+;
+; RV64IXQCCMP-SR-LABEL: nocompress:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0-s8}, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub s2, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, s2
+; RV64IXQCCMP-SR-NEXT:    lui s1, %hi(var)
+; RV64IXQCCMP-SR-NEXT:    lw s3, %lo(var)(s1)
+; RV64IXQCCMP-SR-NEXT:    lw s4, %lo(var+4)(s1)
+; RV64IXQCCMP-SR-NEXT:    lw s5, %lo(var+8)(s1)
+; RV64IXQCCMP-SR-NEXT:    lw s6, %lo(var+12)(s1)
+; RV64IXQCCMP-SR-NEXT:    addi s7, s1, %lo(var)
+; RV64IXQCCMP-SR-NEXT:    lw s8, 16(s7)
+; RV64IXQCCMP-SR-NEXT:    mv a0, s2
+; RV64IXQCCMP-SR-NEXT:    call callee_void
+; RV64IXQCCMP-SR-NEXT:    sw s8, 16(s7)
+; RV64IXQCCMP-SR-NEXT:    sw s6, %lo(var+12)(s1)
+; RV64IXQCCMP-SR-NEXT:    sw s5, %lo(var+8)(s1)
+; RV64IXQCCMP-SR-NEXT:    sw s4, %lo(var+4)(s1)
+; RV64IXQCCMP-SR-NEXT:    sw s3, %lo(var)(s1)
+; RV64IXQCCMP-SR-NEXT:    mv a0, s2
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 80
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pop {ra, s0-s8}, 80
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s1
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s2
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s3
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s4
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s5
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s6
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s7
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s8
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-SR-NEXT:    tail callee
+entry:
+  %0 = alloca i8, i32 %size, align 16
+  %val = load [5 x i32], ptr @var
+  call void @callee_void(ptr nonnull %0)
+  store volatile [5 x i32] %val, ptr @var
+  %1 = tail call i32 @callee(ptr nonnull %0)
+  ret i32 %1
+}
+
+; Check that functions with varargs do not use save/restore code
+
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+
+define i32 @varargs(ptr %fmt, ...) {
+; RV32IXQCCMP-LABEL: varargs:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    addi sp, sp, -48
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-NEXT:    mv a0, a1
+; RV32IXQCCMP-NEXT:    sw a5, 36(sp)
+; RV32IXQCCMP-NEXT:    sw a6, 40(sp)
+; RV32IXQCCMP-NEXT:    sw a7, 44(sp)
+; RV32IXQCCMP-NEXT:    sw a1, 20(sp)
+; RV32IXQCCMP-NEXT:    sw a2, 24(sp)
+; RV32IXQCCMP-NEXT:    sw a3, 28(sp)
+; RV32IXQCCMP-NEXT:    sw a4, 32(sp)
+; RV32IXQCCMP-NEXT:    addi a1, sp, 24
+; RV32IXQCCMP-NEXT:    sw a1, 12(sp)
+; RV32IXQCCMP-NEXT:    addi sp, sp, 48
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-NEXT:    ret
+;
+; RV64IXQCCMP-LABEL: varargs:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    addi sp, sp, -80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-NEXT:    sd a1, 24(sp)
+; RV64IXQCCMP-NEXT:    addi a0, sp, 28
+; RV64IXQCCMP-NEXT:    sd a0, 8(sp)
+; RV64IXQCCMP-NEXT:    lw a0, 24(sp)
+; RV64IXQCCMP-NEXT:    sd a5, 56(sp)
+; RV64IXQCCMP-NEXT:    sd a6, 64(sp)
+; RV64IXQCCMP-NEXT:    sd a7, 72(sp)
+; RV64IXQCCMP-NEXT:    sd a2, 32(sp)
+; RV64IXQCCMP-NEXT:    sd a3, 40(sp)
+; RV64IXQCCMP-NEXT:    sd a4, 48(sp)
+; RV64IXQCCMP-NEXT:    addi sp, sp, 80
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-NEXT:    ret
+;
+; RV32IXQCCMP-FP-LABEL: varargs:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-FP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -36
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -40
+; RV32IXQCCMP-FP-NEXT:    addi s0, sp, 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 32
+; RV32IXQCCMP-FP-NEXT:    mv a0, a1
+; RV32IXQCCMP-FP-NEXT:    sw a5, 20(s0)
+; RV32IXQCCMP-FP-NEXT:    sw a6, 24(s0)
+; RV32IXQCCMP-FP-NEXT:    sw a7, 28(s0)
+; RV32IXQCCMP-FP-NEXT:    sw a1, 4(s0)
+; RV32IXQCCMP-FP-NEXT:    sw a2, 8(s0)
+; RV32IXQCCMP-FP-NEXT:    sw a3, 12(s0)
+; RV32IXQCCMP-FP-NEXT:    sw a4, 16(s0)
+; RV32IXQCCMP-FP-NEXT:    addi a1, s0, 8
+; RV32IXQCCMP-FP-NEXT:    sw a1, -12(s0)
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 48
+; RV32IXQCCMP-FP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, 48
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-FP-NEXT:    ret
+;
+; RV64IXQCCMP-FP-LABEL: varargs:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, -96
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 96
+; RV64IXQCCMP-FP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -72
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -80
+; RV64IXQCCMP-FP-NEXT:    addi s0, sp, 32
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 64
+; RV64IXQCCMP-FP-NEXT:    sd a1, 8(s0)
+; RV64IXQCCMP-FP-NEXT:    addi a0, s0, 12
+; RV64IXQCCMP-FP-NEXT:    sd a0, -24(s0)
+; RV64IXQCCMP-FP-NEXT:    lw a0, 8(s0)
+; RV64IXQCCMP-FP-NEXT:    sd a5, 40(s0)
+; RV64IXQCCMP-FP-NEXT:    sd a6, 48(s0)
+; RV64IXQCCMP-FP-NEXT:    sd a7, 56(s0)
+; RV64IXQCCMP-FP-NEXT:    sd a2, 16(s0)
+; RV64IXQCCMP-FP-NEXT:    sd a3, 24(s0)
+; RV64IXQCCMP-FP-NEXT:    sd a4, 32(s0)
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 96
+; RV64IXQCCMP-FP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, 96
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-FP-NEXT:    ret
+;
+; RV32IXQCCMP-SR-LABEL: varargs:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    addi sp, sp, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 48
+; RV32IXQCCMP-SR-NEXT:    mv a0, a1
+; RV32IXQCCMP-SR-NEXT:    sw a5, 36(sp)
+; RV32IXQCCMP-SR-NEXT:    sw a6, 40(sp)
+; RV32IXQCCMP-SR-NEXT:    sw a7, 44(sp)
+; RV32IXQCCMP-SR-NEXT:    sw a1, 20(sp)
+; RV32IXQCCMP-SR-NEXT:    sw a2, 24(sp)
+; RV32IXQCCMP-SR-NEXT:    sw a3, 28(sp)
+; RV32IXQCCMP-SR-NEXT:    sw a4, 32(sp)
+; RV32IXQCCMP-SR-NEXT:    addi a1, sp, 24
+; RV32IXQCCMP-SR-NEXT:    sw a1, 12(sp)
+; RV32IXQCCMP-SR-NEXT:    addi sp, sp, 48
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-SR-NEXT:    ret
+;
+; RV64IXQCCMP-SR-LABEL: varargs:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 80
+; RV64IXQCCMP-SR-NEXT:    sd a1, 24(sp)
+; RV64IXQCCMP-SR-NEXT:    addi a0, sp, 28
+; RV64IXQCCMP-SR-NEXT:    sd a0, 8(sp)
+; RV64IXQCCMP-SR-NEXT:    lw a0, 24(sp)
+; RV64IXQCCMP-SR-NEXT:    sd a5, 56(sp)
+; RV64IXQCCMP-SR-NEXT:    sd a6, 64(sp)
+; RV64IXQCCMP-SR-NEXT:    sd a7, 72(sp)
+; RV64IXQCCMP-SR-NEXT:    sd a2, 32(sp)
+; RV64IXQCCMP-SR-NEXT:    sd a3, 40(sp)
+; RV64IXQCCMP-SR-NEXT:    sd a4, 48(sp)
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, 80
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-SR-NEXT:    ret
+  %va = alloca ptr
+  call void @llvm.va_start(ptr %va)
+  %argp.cur = load ptr, ptr %va
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %va
+  %1 = load i32, ptr %argp.cur
+  call void @llvm.va_end(ptr %va)
+  ret i32 %1
+}
+
+ at var0 = global [18 x i32] zeroinitializer
+
+define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) {
+; RV32IXQCCMP-LABEL: many_args:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s4}, -32
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 32
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    lui a0, %hi(var0)
+; RV32IXQCCMP-NEXT:    lw a6, %lo(var0)(a0)
+; RV32IXQCCMP-NEXT:    lw a7, %lo(var0+4)(a0)
+; RV32IXQCCMP-NEXT:    lw t0, %lo(var0+8)(a0)
+; RV32IXQCCMP-NEXT:    lw t1, %lo(var0+12)(a0)
+; RV32IXQCCMP-NEXT:    addi a5, a0, %lo(var0)
+; RV32IXQCCMP-NEXT:    lw t2, 16(a5)
+; RV32IXQCCMP-NEXT:    lw t3, 20(a5)
+; RV32IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    lw t6, 48(a5)
+; RV32IXQCCMP-NEXT:    lw s2, 52(a5)
+; RV32IXQCCMP-NEXT:    lw a3, 56(a5)
+; RV32IXQCCMP-NEXT:    lw a4, 60(a5)
+; RV32IXQCCMP-NEXT:    lw a1, 64(a5)
+; RV32IXQCCMP-NEXT:    lw s0, 68(a5)
+; RV32IXQCCMP-NEXT:    lw s3, 32(a5)
+; RV32IXQCCMP-NEXT:    lw s4, 36(a5)
+; RV32IXQCCMP-NEXT:    lw s1, 40(a5)
+; RV32IXQCCMP-NEXT:    lw a2, 44(a5)
+; RV32IXQCCMP-NEXT:    sw s0, 68(a5)
+; RV32IXQCCMP-NEXT:    sw a1, 64(a5)
+; RV32IXQCCMP-NEXT:    sw a4, 60(a5)
+; RV32IXQCCMP-NEXT:    sw a3, 56(a5)
+; RV32IXQCCMP-NEXT:    sw s2, 52(a5)
+; RV32IXQCCMP-NEXT:    sw t6, 48(a5)
+; RV32IXQCCMP-NEXT:    sw a2, 44(a5)
+; RV32IXQCCMP-NEXT:    sw s1, 40(a5)
+; RV32IXQCCMP-NEXT:    sw s4, 36(a5)
+; RV32IXQCCMP-NEXT:    sw s3, 32(a5)
+; RV32IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    sw t3, 20(a5)
+; RV32IXQCCMP-NEXT:    sw t2, 16(a5)
+; RV32IXQCCMP-NEXT:    sw t1, %lo(var0+12)(a0)
+; RV32IXQCCMP-NEXT:    sw t0, %lo(var0+8)(a0)
+; RV32IXQCCMP-NEXT:    sw a7, %lo(var0+4)(a0)
+; RV32IXQCCMP-NEXT:    sw a6, %lo(var0)(a0)
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s4}, 32
+;
+; RV64IXQCCMP-LABEL: many_args:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s4}, -48
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 48
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    lui a0, %hi(var0)
+; RV64IXQCCMP-NEXT:    lw a6, %lo(var0)(a0)
+; RV64IXQCCMP-NEXT:    lw a7, %lo(var0+4)(a0)
+; RV64IXQCCMP-NEXT:    lw t0, %lo(var0+8)(a0)
+; RV64IXQCCMP-NEXT:    lw t1, %lo(var0+12)(a0)
+; RV64IXQCCMP-NEXT:    addi a5, a0, %lo(var0)
+; RV64IXQCCMP-NEXT:    lw t2, 16(a5)
+; RV64IXQCCMP-NEXT:    lw t3, 20(a5)
+; RV64IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    lw t6, 48(a5)
+; RV64IXQCCMP-NEXT:    lw s2, 52(a5)
+; RV64IXQCCMP-NEXT:    lw a3, 56(a5)
+; RV64IXQCCMP-NEXT:    lw a4, 60(a5)
+; RV64IXQCCMP-NEXT:    lw a1, 64(a5)
+; RV64IXQCCMP-NEXT:    lw s0, 68(a5)
+; RV64IXQCCMP-NEXT:    lw s3, 32(a5)
+; RV64IXQCCMP-NEXT:    lw s4, 36(a5)
+; RV64IXQCCMP-NEXT:    lw s1, 40(a5)
+; RV64IXQCCMP-NEXT:    lw a2, 44(a5)
+; RV64IXQCCMP-NEXT:    sw s0, 68(a5)
+; RV64IXQCCMP-NEXT:    sw a1, 64(a5)
+; RV64IXQCCMP-NEXT:    sw a4, 60(a5)
+; RV64IXQCCMP-NEXT:    sw a3, 56(a5)
+; RV64IXQCCMP-NEXT:    sw s2, 52(a5)
+; RV64IXQCCMP-NEXT:    sw t6, 48(a5)
+; RV64IXQCCMP-NEXT:    sw a2, 44(a5)
+; RV64IXQCCMP-NEXT:    sw s1, 40(a5)
+; RV64IXQCCMP-NEXT:    sw s4, 36(a5)
+; RV64IXQCCMP-NEXT:    sw s3, 32(a5)
+; RV64IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    sw t3, 20(a5)
+; RV64IXQCCMP-NEXT:    sw t2, 16(a5)
+; RV64IXQCCMP-NEXT:    sw t1, %lo(var0+12)(a0)
+; RV64IXQCCMP-NEXT:    sw t0, %lo(var0+8)(a0)
+; RV64IXQCCMP-NEXT:    sw a7, %lo(var0+4)(a0)
+; RV64IXQCCMP-NEXT:    sw a6, %lo(var0)(a0)
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s4}, 48
+;
+; RV32IXQCCMP-FP-LABEL: many_args:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s5}, -32
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 32
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    lui a0, %hi(var0)
+; RV32IXQCCMP-FP-NEXT:    lw a6, %lo(var0)(a0)
+; RV32IXQCCMP-FP-NEXT:    lw a7, %lo(var0+4)(a0)
+; RV32IXQCCMP-FP-NEXT:    lw t0, %lo(var0+8)(a0)
+; RV32IXQCCMP-FP-NEXT:    lw t1, %lo(var0+12)(a0)
+; RV32IXQCCMP-FP-NEXT:    addi a5, a0, %lo(var0)
+; RV32IXQCCMP-FP-NEXT:    lw t2, 16(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t3, 20(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t6, 48(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s3, 52(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s5, 56(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a4, 60(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a1, 64(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s1, 68(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s2, 32(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s4, 36(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a2, 40(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a3, 44(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s1, 68(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a1, 64(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a4, 60(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s5, 56(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s3, 52(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t6, 48(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a3, 44(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a2, 40(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s4, 36(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s2, 32(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t3, 20(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t2, 16(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t1, %lo(var0+12)(a0)
+; RV32IXQCCMP-FP-NEXT:    sw t0, %lo(var0+8)(a0)
+; RV32IXQCCMP-FP-NEXT:    sw a7, %lo(var0+4)(a0)
+; RV32IXQCCMP-FP-NEXT:    sw a6, %lo(var0)(a0)
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 32
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s5}, 32
+;
+; RV64IXQCCMP-FP-LABEL: many_args:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s5}, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    lui a0, %hi(var0)
+; RV64IXQCCMP-FP-NEXT:    lw a6, %lo(var0)(a0)
+; RV64IXQCCMP-FP-NEXT:    lw a7, %lo(var0+4)(a0)
+; RV64IXQCCMP-FP-NEXT:    lw t0, %lo(var0+8)(a0)
+; RV64IXQCCMP-FP-NEXT:    lw t1, %lo(var0+12)(a0)
+; RV64IXQCCMP-FP-NEXT:    addi a5, a0, %lo(var0)
+; RV64IXQCCMP-FP-NEXT:    lw t2, 16(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t3, 20(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t6, 48(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s3, 52(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s5, 56(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a4, 60(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a1, 64(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s1, 68(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s2, 32(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s4, 36(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a2, 40(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a3, 44(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s1, 68(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a1, 64(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a4, 60(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s5, 56(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s3, 52(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t6, 48(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a3, 44(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a2, 40(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s4, 36(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s2, 32(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t3, 20(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t2, 16(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t1, %lo(var0+12)(a0)
+; RV64IXQCCMP-FP-NEXT:    sw t0, %lo(var0+8)(a0)
+; RV64IXQCCMP-FP-NEXT:    sw a7, %lo(var0+4)(a0)
+; RV64IXQCCMP-FP-NEXT:    sw a6, %lo(var0)(a0)
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 64
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s5}, 64
+;
+; RV32IXQCCMP-SR-LABEL: many_args:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s4}, -32
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 32
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-SR-NEXT:    lui a0, %hi(var0)
+; RV32IXQCCMP-SR-NEXT:    lw a6, %lo(var0)(a0)
+; RV32IXQCCMP-SR-NEXT:    lw a7, %lo(var0+4)(a0)
+; RV32IXQCCMP-SR-NEXT:    lw t0, %lo(var0+8)(a0)
+; RV32IXQCCMP-SR-NEXT:    lw t1, %lo(var0+12)(a0)
+; RV32IXQCCMP-SR-NEXT:    addi a5, a0, %lo(var0)
+; RV32IXQCCMP-SR-NEXT:    lw t2, 16(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t3, 20(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t6, 48(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s2, 52(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a3, 56(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a4, 60(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a1, 64(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s0, 68(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s3, 32(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s4, 36(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s1, 40(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a2, 44(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s0, 68(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a1, 64(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a4, 60(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a3, 56(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s2, 52(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t6, 48(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a2, 44(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s1, 40(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s4, 36(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s3, 32(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t3, 20(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t2, 16(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t1, %lo(var0+12)(a0)
+; RV32IXQCCMP-SR-NEXT:    sw t0, %lo(var0+8)(a0)
+; RV32IXQCCMP-SR-NEXT:    sw a7, %lo(var0+4)(a0)
+; RV32IXQCCMP-SR-NEXT:    sw a6, %lo(var0)(a0)
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s4}, 32
+;
+; RV64IXQCCMP-SR-LABEL: many_args:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s4}, -48
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 48
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-SR-NEXT:    lui a0, %hi(var0)
+; RV64IXQCCMP-SR-NEXT:    lw a6, %lo(var0)(a0)
+; RV64IXQCCMP-SR-NEXT:    lw a7, %lo(var0+4)(a0)
+; RV64IXQCCMP-SR-NEXT:    lw t0, %lo(var0+8)(a0)
+; RV64IXQCCMP-SR-NEXT:    lw t1, %lo(var0+12)(a0)
+; RV64IXQCCMP-SR-NEXT:    addi a5, a0, %lo(var0)
+; RV64IXQCCMP-SR-NEXT:    lw t2, 16(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t3, 20(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t6, 48(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s2, 52(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a3, 56(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a4, 60(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a1, 64(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s0, 68(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s3, 32(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s4, 36(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s1, 40(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a2, 44(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s0, 68(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a1, 64(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a4, 60(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a3, 56(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s2, 52(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t6, 48(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a2, 44(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s1, 40(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s4, 36(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s3, 32(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t3, 20(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t2, 16(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t1, %lo(var0+12)(a0)
+; RV64IXQCCMP-SR-NEXT:    sw t0, %lo(var0+8)(a0)
+; RV64IXQCCMP-SR-NEXT:    sw a7, %lo(var0+4)(a0)
+; RV64IXQCCMP-SR-NEXT:    sw a6, %lo(var0)(a0)
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s4}, 48
+entry:
+  %val = load [18 x i32], ptr @var0
+  store volatile [18 x i32] %val, ptr @var0
+  ret void
+}
+
+; Check that dynamic allocation calculations remain correct
+
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
+declare void @notdead(ptr)
+
+define void @alloca(i32 %n) {
+; RV32IXQCCMP-LABEL: alloca:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    mv s1, sp
+; RV32IXQCCMP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-NEXT:    mv sp, a0
+; RV32IXQCCMP-NEXT:    call notdead
+; RV32IXQCCMP-NEXT:    mv sp, s1
+; RV32IXQCCMP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+;
+; RV64IXQCCMP-LABEL: alloca:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -32
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 32
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    mv s1, sp
+; RV64IXQCCMP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-NEXT:    mv sp, a0
+; RV64IXQCCMP-NEXT:    call notdead
+; RV64IXQCCMP-NEXT:    mv sp, s1
+; RV64IXQCCMP-NEXT:    addi sp, s0, -32
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 32
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s1}, 32
+;
+; RV32IXQCCMP-FP-LABEL: alloca:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    mv s1, sp
+; RV32IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-FP-NEXT:    mv sp, a0
+; RV32IXQCCMP-FP-NEXT:    call notdead
+; RV32IXQCCMP-FP-NEXT:    mv sp, s1
+; RV32IXQCCMP-FP-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+;
+; RV64IXQCCMP-FP-LABEL: alloca:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    mv s1, sp
+; RV64IXQCCMP-FP-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-FP-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-FP-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-FP-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-FP-NEXT:    mv sp, a0
+; RV64IXQCCMP-FP-NEXT:    call notdead
+; RV64IXQCCMP-FP-NEXT:    mv sp, s1
+; RV64IXQCCMP-FP-NEXT:    addi sp, s0, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 32
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s1}, 32
+;
+; RV32IXQCCMP-SR-LABEL: alloca:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0-s1}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    mv s1, sp
+; RV32IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV32IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV32IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV32IXQCCMP-SR-NEXT:    mv sp, a0
+; RV32IXQCCMP-SR-NEXT:    call notdead
+; RV32IXQCCMP-SR-NEXT:    mv sp, s1
+; RV32IXQCCMP-SR-NEXT:    addi sp, s0, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+;
+; RV64IXQCCMP-SR-LABEL: alloca:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0-s1}, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    mv s1, sp
+; RV64IXQCCMP-SR-NEXT:    slli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    srli a0, a0, 32
+; RV64IXQCCMP-SR-NEXT:    addi a0, a0, 15
+; RV64IXQCCMP-SR-NEXT:    andi a0, a0, -16
+; RV64IXQCCMP-SR-NEXT:    sub a0, sp, a0
+; RV64IXQCCMP-SR-NEXT:    mv sp, a0
+; RV64IXQCCMP-SR-NEXT:    call notdead
+; RV64IXQCCMP-SR-NEXT:    mv sp, s1
+; RV64IXQCCMP-SR-NEXT:    addi sp, s0, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 32
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s1}, 32
+  %sp = call ptr @llvm.stacksave()
+  %addr = alloca i8, i32 %n
+  call void @notdead(ptr %addr)
+  call void @llvm.stackrestore(ptr %sp)
+  ret void
+}
+
+declare i32 @foo_test_irq(...)
+ at var_test_irq = global [32 x i32] zeroinitializer
+
+define void @foo_with_irq() "interrupt"="machine" {
+; RV32IXQCCMP-LABEL: foo_with_irq:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra}, -64
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t1, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t2, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a1, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a2, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a3, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a4, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a6, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a7, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t3, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    .cfi_offset t0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset t1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset t2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset a0, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset a1, -24
+; RV32IXQCCMP-NEXT:    .cfi_offset a2, -28
+; RV32IXQCCMP-NEXT:    .cfi_offset a3, -32
+; RV32IXQCCMP-NEXT:    .cfi_offset a4, -36
+; RV32IXQCCMP-NEXT:    .cfi_offset a5, -40
+; RV32IXQCCMP-NEXT:    .cfi_offset a6, -44
+; RV32IXQCCMP-NEXT:    .cfi_offset a7, -48
+; RV32IXQCCMP-NEXT:    .cfi_offset t3, -52
+; RV32IXQCCMP-NEXT:    .cfi_offset t4, -56
+; RV32IXQCCMP-NEXT:    .cfi_offset t5, -60
+; RV32IXQCCMP-NEXT:    .cfi_offset t6, -64
+; RV32IXQCCMP-NEXT:    call foo_test_irq
+; RV32IXQCCMP-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t2, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a1, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a7, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t3, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t4, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t5, 4(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t6, 0(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    .cfi_restore t0
+; RV32IXQCCMP-NEXT:    .cfi_restore t1
+; RV32IXQCCMP-NEXT:    .cfi_restore t2
+; RV32IXQCCMP-NEXT:    .cfi_restore a0
+; RV32IXQCCMP-NEXT:    .cfi_restore a1
+; RV32IXQCCMP-NEXT:    .cfi_restore a2
+; RV32IXQCCMP-NEXT:    .cfi_restore a3
+; RV32IXQCCMP-NEXT:    .cfi_restore a4
+; RV32IXQCCMP-NEXT:    .cfi_restore a5
+; RV32IXQCCMP-NEXT:    .cfi_restore a6
+; RV32IXQCCMP-NEXT:    .cfi_restore a7
+; RV32IXQCCMP-NEXT:    .cfi_restore t3
+; RV32IXQCCMP-NEXT:    .cfi_restore t4
+; RV32IXQCCMP-NEXT:    .cfi_restore t5
+; RV32IXQCCMP-NEXT:    .cfi_restore t6
+; RV32IXQCCMP-NEXT:    qc.cm.pop {ra}, 64
+; RV32IXQCCMP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-NEXT:    mret
+;
+; RV64IXQCCMP-LABEL: foo_with_irq:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra}, -64
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    addi sp, sp, -64
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 128
+; RV64IXQCCMP-NEXT:    sd t0, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t1, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t2, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a1, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a2, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a3, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a4, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a5, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a6, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a7, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t3, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t4, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t5, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t6, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    .cfi_offset t0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset t1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset t2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset a0, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset a1, -48
+; RV64IXQCCMP-NEXT:    .cfi_offset a2, -56
+; RV64IXQCCMP-NEXT:    .cfi_offset a3, -64
+; RV64IXQCCMP-NEXT:    .cfi_offset a4, -72
+; RV64IXQCCMP-NEXT:    .cfi_offset a5, -80
+; RV64IXQCCMP-NEXT:    .cfi_offset a6, -88
+; RV64IXQCCMP-NEXT:    .cfi_offset a7, -96
+; RV64IXQCCMP-NEXT:    .cfi_offset t3, -104
+; RV64IXQCCMP-NEXT:    .cfi_offset t4, -112
+; RV64IXQCCMP-NEXT:    .cfi_offset t5, -120
+; RV64IXQCCMP-NEXT:    .cfi_offset t6, -128
+; RV64IXQCCMP-NEXT:    call foo_test_irq
+; RV64IXQCCMP-NEXT:    ld t0, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t1, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t2, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a1, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a2, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a3, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a4, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a5, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a6, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a7, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t3, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t4, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t5, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t6, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    .cfi_restore t0
+; RV64IXQCCMP-NEXT:    .cfi_restore t1
+; RV64IXQCCMP-NEXT:    .cfi_restore t2
+; RV64IXQCCMP-NEXT:    .cfi_restore a0
+; RV64IXQCCMP-NEXT:    .cfi_restore a1
+; RV64IXQCCMP-NEXT:    .cfi_restore a2
+; RV64IXQCCMP-NEXT:    .cfi_restore a3
+; RV64IXQCCMP-NEXT:    .cfi_restore a4
+; RV64IXQCCMP-NEXT:    .cfi_restore a5
+; RV64IXQCCMP-NEXT:    .cfi_restore a6
+; RV64IXQCCMP-NEXT:    .cfi_restore a7
+; RV64IXQCCMP-NEXT:    .cfi_restore t3
+; RV64IXQCCMP-NEXT:    .cfi_restore t4
+; RV64IXQCCMP-NEXT:    .cfi_restore t5
+; RV64IXQCCMP-NEXT:    .cfi_restore t6
+; RV64IXQCCMP-NEXT:    addi sp, sp, 64
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-NEXT:    qc.cm.pop {ra}, 64
+; RV64IXQCCMP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-NEXT:    mret
+;
+; RV32IXQCCMP-FP-LABEL: foo_with_irq:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -64
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 80
+; RV32IXQCCMP-FP-NEXT:    sw t0, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t1, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t2, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a0, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a1, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a2, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a3, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a4, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a5, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a6, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a7, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t3, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t4, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t0, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t1, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t2, -20
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a0, -24
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a1, -28
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a2, -32
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a3, -36
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a4, -40
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a5, -44
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a6, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a7, -52
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t3, -56
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t4, -60
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t5, -64
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t6, -68
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    call foo_test_irq
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 80
+; RV32IXQCCMP-FP-NEXT:    lw t0, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t1, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t2, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a0, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a1, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a2, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a4, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a5, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a7, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t3, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t4, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t6, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t0
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t1
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t2
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a0
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a1
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a2
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a3
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a4
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a5
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a6
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a7
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t3
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t4
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t5
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t6
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0}, 64
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-FP-NEXT:    mret
+;
+; RV64IXQCCMP-FP-LABEL: foo_with_irq:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 144
+; RV64IXQCCMP-FP-NEXT:    sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t0, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t1, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t2, -40
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a0, -48
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a1, -56
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a2, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a3, -72
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a4, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a5, -88
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a6, -96
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a7, -104
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t3, -112
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t4, -120
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t5, -128
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t6, -136
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    call foo_test_irq
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 144
+; RV64IXQCCMP-FP-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t0
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t1
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t2
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a0
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a1
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a2
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a3
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a4
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a5
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a6
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a7
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t3
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t4
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t5
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t6
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, 80
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0}, 64
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-FP-NEXT:    mret
+;
+; RV32IXQCCMP-SR-LABEL: foo_with_irq:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra}, -64
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t1, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t2, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a1, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a2, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a3, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a4, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a6, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a7, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t3, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t2, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a0, -20
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a1, -24
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a2, -28
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a3, -32
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a4, -36
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a5, -40
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a6, -44
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a7, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t3, -52
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t4, -56
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t5, -60
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t6, -64
+; RV32IXQCCMP-SR-NEXT:    call foo_test_irq
+; RV32IXQCCMP-SR-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t2, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a1, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a7, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t3, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t4, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t5, 4(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t6, 0(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t0
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t1
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t2
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a0
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a1
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a2
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a3
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a4
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a5
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a6
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a7
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t3
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t4
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t5
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t6
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pop {ra}, 64
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-SR-NEXT:    mret
+;
+; RV64IXQCCMP-SR-LABEL: foo_with_irq:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra}, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 128
+; RV64IXQCCMP-SR-NEXT:    sd t0, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t1, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t2, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a1, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a2, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a3, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a4, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a5, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a6, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a7, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t3, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t4, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t5, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t6, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t2, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a0, -40
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a1, -48
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a2, -56
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a3, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a4, -72
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a5, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a6, -88
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a7, -96
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t3, -104
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t4, -112
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t5, -120
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t6, -128
+; RV64IXQCCMP-SR-NEXT:    call foo_test_irq
+; RV64IXQCCMP-SR-NEXT:    ld t0, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t1, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t2, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a1, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a2, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a3, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a4, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a5, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a6, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a7, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t3, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t4, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t5, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t6, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t0
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t1
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t2
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a0
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a1
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a2
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a3
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a4
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a5
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a6
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a7
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t3
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t4
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t5
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t6
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, 64
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pop {ra}, 64
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-SR-NEXT:    mret
+  %call = call i32 @foo_test_irq()
+  ret void
+}
+
+define void @foo_no_irq() {
+; RV32IXQCCMP-LABEL: foo_no_irq:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    call foo_test_irq
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra}, 16
+;
+; RV64IXQCCMP-LABEL: foo_no_irq:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra}, -16
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    call foo_test_irq
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra}, 16
+;
+; RV32IXQCCMP-FP-LABEL: foo_no_irq:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    call foo_test_irq
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV64IXQCCMP-FP-LABEL: foo_no_irq:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0}, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    call foo_test_irq
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0}, 16
+;
+; RV32IXQCCMP-SR-LABEL: foo_no_irq:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    call foo_test_irq
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra}, 16
+;
+; RV64IXQCCMP-SR-LABEL: foo_no_irq:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra}, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    call foo_test_irq
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra}, 16
+  %call = call i32 @foo_test_irq()
+  ret void
+}
+
+define void @callee_with_irq() "interrupt"="machine" {
+; RV32IXQCCMP-LABEL: callee_with_irq:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -112
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-NEXT:    addi sp, sp, -32
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 144
+; RV32IXQCCMP-NEXT:    sw t0, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t1, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t2, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a1, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a2, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a3, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a4, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a5, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a6, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw a7, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t3, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t4, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t5, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    sw t6, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    .cfi_offset t0, -56
+; RV32IXQCCMP-NEXT:    .cfi_offset t1, -60
+; RV32IXQCCMP-NEXT:    .cfi_offset t2, -64
+; RV32IXQCCMP-NEXT:    .cfi_offset a0, -68
+; RV32IXQCCMP-NEXT:    .cfi_offset a1, -72
+; RV32IXQCCMP-NEXT:    .cfi_offset a2, -76
+; RV32IXQCCMP-NEXT:    .cfi_offset a3, -80
+; RV32IXQCCMP-NEXT:    .cfi_offset a4, -84
+; RV32IXQCCMP-NEXT:    .cfi_offset a5, -88
+; RV32IXQCCMP-NEXT:    .cfi_offset a6, -92
+; RV32IXQCCMP-NEXT:    .cfi_offset a7, -96
+; RV32IXQCCMP-NEXT:    .cfi_offset t3, -100
+; RV32IXQCCMP-NEXT:    .cfi_offset t4, -104
+; RV32IXQCCMP-NEXT:    .cfi_offset t5, -108
+; RV32IXQCCMP-NEXT:    .cfi_offset t6, -112
+; RV32IXQCCMP-NEXT:    lui t0, %hi(var_test_irq)
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV32IXQCCMP-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-NEXT:    lw s1, 80(a5)
+; RV32IXQCCMP-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-NEXT:    lw t1, 92(a5)
+; RV32IXQCCMP-NEXT:    lw a7, 112(a5)
+; RV32IXQCCMP-NEXT:    lw s0, 116(a5)
+; RV32IXQCCMP-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-NEXT:    lw a6, 96(a5)
+; RV32IXQCCMP-NEXT:    lw a4, 100(a5)
+; RV32IXQCCMP-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-NEXT:    sw s0, 116(a5)
+; RV32IXQCCMP-NEXT:    sw a7, 112(a5)
+; RV32IXQCCMP-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-NEXT:    sw a4, 100(a5)
+; RV32IXQCCMP-NEXT:    sw a6, 96(a5)
+; RV32IXQCCMP-NEXT:    sw t1, 92(a5)
+; RV32IXQCCMP-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-NEXT:    sw s1, 80(a5)
+; RV32IXQCCMP-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-NEXT:    lw t0, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t1, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t2, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a0, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a1, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a2, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a3, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a4, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a5, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a6, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw a7, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t3, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t4, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t5, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    .cfi_restore t0
+; RV32IXQCCMP-NEXT:    .cfi_restore t1
+; RV32IXQCCMP-NEXT:    .cfi_restore t2
+; RV32IXQCCMP-NEXT:    .cfi_restore a0
+; RV32IXQCCMP-NEXT:    .cfi_restore a1
+; RV32IXQCCMP-NEXT:    .cfi_restore a2
+; RV32IXQCCMP-NEXT:    .cfi_restore a3
+; RV32IXQCCMP-NEXT:    .cfi_restore a4
+; RV32IXQCCMP-NEXT:    .cfi_restore a5
+; RV32IXQCCMP-NEXT:    .cfi_restore a6
+; RV32IXQCCMP-NEXT:    .cfi_restore a7
+; RV32IXQCCMP-NEXT:    .cfi_restore t3
+; RV32IXQCCMP-NEXT:    .cfi_restore t4
+; RV32IXQCCMP-NEXT:    .cfi_restore t5
+; RV32IXQCCMP-NEXT:    .cfi_restore t6
+; RV32IXQCCMP-NEXT:    addi sp, sp, 32
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-NEXT:    qc.cm.pop {ra, s0-s11}, 112
+; RV32IXQCCMP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-NEXT:    .cfi_restore s1
+; RV32IXQCCMP-NEXT:    .cfi_restore s2
+; RV32IXQCCMP-NEXT:    .cfi_restore s3
+; RV32IXQCCMP-NEXT:    .cfi_restore s4
+; RV32IXQCCMP-NEXT:    .cfi_restore s5
+; RV32IXQCCMP-NEXT:    .cfi_restore s6
+; RV32IXQCCMP-NEXT:    .cfi_restore s7
+; RV32IXQCCMP-NEXT:    .cfi_restore s8
+; RV32IXQCCMP-NEXT:    .cfi_restore s9
+; RV32IXQCCMP-NEXT:    .cfi_restore s10
+; RV32IXQCCMP-NEXT:    .cfi_restore s11
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-NEXT:    mret
+;
+; RV64IXQCCMP-LABEL: callee_with_irq:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -160
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-NEXT:    addi sp, sp, -112
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 272
+; RV64IXQCCMP-NEXT:    sd t0, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t1, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t2, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a0, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a1, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a2, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a3, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a4, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a5, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a6, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd a7, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t3, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t4, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t5, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    sd t6, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    .cfi_offset t0, -112
+; RV64IXQCCMP-NEXT:    .cfi_offset t1, -120
+; RV64IXQCCMP-NEXT:    .cfi_offset t2, -128
+; RV64IXQCCMP-NEXT:    .cfi_offset a0, -136
+; RV64IXQCCMP-NEXT:    .cfi_offset a1, -144
+; RV64IXQCCMP-NEXT:    .cfi_offset a2, -152
+; RV64IXQCCMP-NEXT:    .cfi_offset a3, -160
+; RV64IXQCCMP-NEXT:    .cfi_offset a4, -168
+; RV64IXQCCMP-NEXT:    .cfi_offset a5, -176
+; RV64IXQCCMP-NEXT:    .cfi_offset a6, -184
+; RV64IXQCCMP-NEXT:    .cfi_offset a7, -192
+; RV64IXQCCMP-NEXT:    .cfi_offset t3, -200
+; RV64IXQCCMP-NEXT:    .cfi_offset t4, -208
+; RV64IXQCCMP-NEXT:    .cfi_offset t5, -216
+; RV64IXQCCMP-NEXT:    .cfi_offset t6, -224
+; RV64IXQCCMP-NEXT:    lui t0, %hi(var_test_irq)
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV64IXQCCMP-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-NEXT:    sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-NEXT:    lw s1, 80(a5)
+; RV64IXQCCMP-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-NEXT:    lw t1, 92(a5)
+; RV64IXQCCMP-NEXT:    lw a7, 112(a5)
+; RV64IXQCCMP-NEXT:    lw s0, 116(a5)
+; RV64IXQCCMP-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-NEXT:    lw a6, 96(a5)
+; RV64IXQCCMP-NEXT:    lw a4, 100(a5)
+; RV64IXQCCMP-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-NEXT:    sw s0, 116(a5)
+; RV64IXQCCMP-NEXT:    sw a7, 112(a5)
+; RV64IXQCCMP-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-NEXT:    sw a4, 100(a5)
+; RV64IXQCCMP-NEXT:    sw a6, 96(a5)
+; RV64IXQCCMP-NEXT:    sw t1, 92(a5)
+; RV64IXQCCMP-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-NEXT:    sw s1, 80(a5)
+; RV64IXQCCMP-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-NEXT:    ld t0, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t1, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t2, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a0, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a1, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a2, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a3, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a4, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a5, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a6, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld a7, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t3, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t4, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t5, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    ld t6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    .cfi_restore t0
+; RV64IXQCCMP-NEXT:    .cfi_restore t1
+; RV64IXQCCMP-NEXT:    .cfi_restore t2
+; RV64IXQCCMP-NEXT:    .cfi_restore a0
+; RV64IXQCCMP-NEXT:    .cfi_restore a1
+; RV64IXQCCMP-NEXT:    .cfi_restore a2
+; RV64IXQCCMP-NEXT:    .cfi_restore a3
+; RV64IXQCCMP-NEXT:    .cfi_restore a4
+; RV64IXQCCMP-NEXT:    .cfi_restore a5
+; RV64IXQCCMP-NEXT:    .cfi_restore a6
+; RV64IXQCCMP-NEXT:    .cfi_restore a7
+; RV64IXQCCMP-NEXT:    .cfi_restore t3
+; RV64IXQCCMP-NEXT:    .cfi_restore t4
+; RV64IXQCCMP-NEXT:    .cfi_restore t5
+; RV64IXQCCMP-NEXT:    .cfi_restore t6
+; RV64IXQCCMP-NEXT:    addi sp, sp, 112
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-NEXT:    qc.cm.pop {ra, s0-s11}, 160
+; RV64IXQCCMP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-NEXT:    .cfi_restore s1
+; RV64IXQCCMP-NEXT:    .cfi_restore s2
+; RV64IXQCCMP-NEXT:    .cfi_restore s3
+; RV64IXQCCMP-NEXT:    .cfi_restore s4
+; RV64IXQCCMP-NEXT:    .cfi_restore s5
+; RV64IXQCCMP-NEXT:    .cfi_restore s6
+; RV64IXQCCMP-NEXT:    .cfi_restore s7
+; RV64IXQCCMP-NEXT:    .cfi_restore s8
+; RV64IXQCCMP-NEXT:    .cfi_restore s9
+; RV64IXQCCMP-NEXT:    .cfi_restore s10
+; RV64IXQCCMP-NEXT:    .cfi_restore s11
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-NEXT:    mret
+;
+; RV32IXQCCMP-FP-LABEL: callee_with_irq:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -112
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, -32
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 144
+; RV32IXQCCMP-FP-NEXT:    sw t0, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t1, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t2, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a1, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a2, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a3, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a4, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a5, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a6, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw a7, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t3, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t4, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t5, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    sw t6, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t0, -56
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t1, -60
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t2, -64
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a0, -68
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a1, -72
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a2, -76
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a3, -80
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a4, -84
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a5, -88
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a6, -92
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset a7, -96
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t3, -100
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t4, -104
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t5, -108
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset t6, -112
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    lui t1, %hi(var_test_irq)
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -116(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+4)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -120(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+8)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -124(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+12)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -128(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    addi a5, t1, %lo(var_test_irq)
+; RV32IXQCCMP-FP-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -132(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -136(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, 24(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -140(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-FP-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t4, 80(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s1, 92(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t0, 112(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a4, 116(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a7, 96(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a6, 100(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a4, 116(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t0, 112(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a6, 100(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a7, 96(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s1, 92(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t4, 80(a5)
+; RV32IXQCCMP-FP-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -140(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, 24(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -136(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -132(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -128(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+12)(t1)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -124(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+8)(t1)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -120(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+4)(t1)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -116(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq)(t1)
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 144
+; RV32IXQCCMP-FP-NEXT:    lw t0, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t1, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t2, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a0, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a1, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a2, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a3, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a4, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a5, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a6, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw a7, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t3, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t4, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t5, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t0
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t1
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t2
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a0
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a1
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a2
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a3
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a4
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a5
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a6
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore a7
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t3
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t4
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t5
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore t6
+; RV32IXQCCMP-FP-NEXT:    addi sp, sp, 32
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0-s11}, 112
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s1
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s2
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s3
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s4
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s5
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s6
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s7
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s8
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s9
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s10
+; RV32IXQCCMP-FP-NEXT:    .cfi_restore s11
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-FP-NEXT:    mret
+;
+; RV64IXQCCMP-FP-LABEL: callee_with_irq:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -160
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, -128
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 288
+; RV64IXQCCMP-FP-NEXT:    sd t0, 176(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t1, 168(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t2, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a0, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a1, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a2, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a3, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a4, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a5, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a6, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd a7, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t3, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t4, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t5, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    sd t6, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t0, -112
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t1, -120
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t2, -128
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a0, -136
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a1, -144
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a2, -152
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a3, -160
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a4, -168
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a5, -176
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a6, -184
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset a7, -192
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t3, -200
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t4, -208
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t5, -216
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset t6, -224
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    lui t1, %hi(var_test_irq)
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -232(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+4)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -240(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+8)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -248(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -256(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    addi a5, t1, %lo(var_test_irq)
+; RV64IXQCCMP-FP-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -264(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -272(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -280(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-FP-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t4, 80(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s1, 92(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t0, 112(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a4, 116(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a7, 96(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a6, 100(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a4, 116(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t0, 112(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a6, 100(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a7, 96(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s1, 92(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t4, 80(a5)
+; RV64IXQCCMP-FP-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -280(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -272(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -264(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -256(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -248(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+8)(t1)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -240(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+4)(t1)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -232(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq)(t1)
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 288
+; RV64IXQCCMP-FP-NEXT:    ld t0, 176(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t1, 168(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t2, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a0, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a1, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a2, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a3, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a4, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a5, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a6, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld a7, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t3, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t4, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t5, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    ld t6, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t0
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t1
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t2
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a0
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a1
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a2
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a3
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a4
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a5
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a6
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore a7
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t3
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t4
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t5
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore t6
+; RV64IXQCCMP-FP-NEXT:    addi sp, sp, 128
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pop {ra, s0-s11}, 160
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s1
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s2
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s3
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s4
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s5
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s6
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s7
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s8
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s9
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s10
+; RV64IXQCCMP-FP-NEXT:    .cfi_restore s11
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-FP-NEXT:    mret
+;
+; RV32IXQCCMP-SR-LABEL: callee_with_irq:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s11}, -112
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-SR-NEXT:    addi sp, sp, -32
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 144
+; RV32IXQCCMP-SR-NEXT:    sw t0, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t1, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t2, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a1, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a2, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a3, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a4, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a5, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a6, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw a7, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t3, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t4, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t5, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    sw t6, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t0, -56
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t1, -60
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t2, -64
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a0, -68
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a1, -72
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a2, -76
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a3, -80
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a4, -84
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a5, -88
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a6, -92
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset a7, -96
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t3, -100
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t4, -104
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t5, -108
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset t6, -112
+; RV32IXQCCMP-SR-NEXT:    lui t0, %hi(var_test_irq)
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-SR-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s1, 80(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t1, 92(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a7, 112(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s0, 116(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a6, 96(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a4, 100(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s0, 116(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a7, 112(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a4, 100(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a6, 96(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t1, 92(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s1, 80(a5)
+; RV32IXQCCMP-SR-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw t0, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t1, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t2, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a0, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a1, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a2, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a3, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a4, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a5, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a6, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw a7, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t3, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t4, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t5, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t0
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t1
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t2
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a0
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a1
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a2
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a3
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a4
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a5
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a6
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore a7
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t3
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t4
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t5
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore t6
+; RV32IXQCCMP-SR-NEXT:    addi sp, sp, 32
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 112
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pop {ra, s0-s11}, 112
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s0
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s1
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s2
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s3
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s4
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s5
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s6
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s7
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s8
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s9
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s10
+; RV32IXQCCMP-SR-NEXT:    .cfi_restore s11
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV32IXQCCMP-SR-NEXT:    mret
+;
+; RV64IXQCCMP-SR-LABEL: callee_with_irq:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s11}, -160
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, -112
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 272
+; RV64IXQCCMP-SR-NEXT:    sd t0, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t1, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t2, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a0, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a1, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a2, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a3, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a4, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a5, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a6, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd a7, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t3, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t4, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t5, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    sd t6, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t0, -112
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t1, -120
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t2, -128
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a0, -136
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a1, -144
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a2, -152
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a3, -160
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a4, -168
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a5, -176
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a6, -184
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset a7, -192
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t3, -200
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t4, -208
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t5, -216
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset t6, -224
+; RV64IXQCCMP-SR-NEXT:    lui t0, %hi(var_test_irq)
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV64IXQCCMP-SR-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-SR-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s1, 80(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t1, 92(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a7, 112(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s0, 116(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a6, 96(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a4, 100(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s0, 116(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a7, 112(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a4, 100(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a6, 96(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t1, 92(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s1, 80(a5)
+; RV64IXQCCMP-SR-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld t0, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t1, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t2, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a0, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a1, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a2, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a3, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a4, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a5, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a6, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld a7, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t3, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t4, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t5, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    ld t6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t0
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t1
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t2
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a0
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a1
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a2
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a3
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a4
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a5
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a6
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore a7
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t3
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t4
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t5
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore t6
+; RV64IXQCCMP-SR-NEXT:    addi sp, sp, 112
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pop {ra, s0-s11}, 160
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore ra
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s0
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s1
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s2
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s3
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s4
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s5
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s6
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s7
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s8
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s9
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s10
+; RV64IXQCCMP-SR-NEXT:    .cfi_restore s11
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 0
+; RV64IXQCCMP-SR-NEXT:    mret
+  %val = load [32 x i32], ptr @var_test_irq
+  store volatile [32 x i32] %val, ptr @var_test_irq
+  ret void
+}
+
+define void @callee_no_irq() {
+; RV32IXQCCMP-LABEL: callee_no_irq:
+; RV32IXQCCMP:       # %bb.0:
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -80
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 80
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-NEXT:    lui t0, %hi(var_test_irq)
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV32IXQCCMP-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-NEXT:    lw s1, 80(a5)
+; RV32IXQCCMP-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-NEXT:    lw t1, 92(a5)
+; RV32IXQCCMP-NEXT:    lw a7, 112(a5)
+; RV32IXQCCMP-NEXT:    lw s0, 116(a5)
+; RV32IXQCCMP-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-NEXT:    lw a6, 96(a5)
+; RV32IXQCCMP-NEXT:    lw a4, 100(a5)
+; RV32IXQCCMP-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-NEXT:    sw s0, 116(a5)
+; RV32IXQCCMP-NEXT:    sw a7, 112(a5)
+; RV32IXQCCMP-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-NEXT:    sw a4, 100(a5)
+; RV32IXQCCMP-NEXT:    sw a6, 96(a5)
+; RV32IXQCCMP-NEXT:    sw t1, 92(a5)
+; RV32IXQCCMP-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-NEXT:    sw s1, 80(a5)
+; RV32IXQCCMP-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 80
+;
+; RV64IXQCCMP-LABEL: callee_no_irq:
+; RV64IXQCCMP:       # %bb.0:
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -160
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-NEXT:    lui t0, %hi(var_test_irq)
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV64IXQCCMP-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-NEXT:    lw s1, 80(a5)
+; RV64IXQCCMP-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-NEXT:    lw t1, 92(a5)
+; RV64IXQCCMP-NEXT:    lw a7, 112(a5)
+; RV64IXQCCMP-NEXT:    lw s0, 116(a5)
+; RV64IXQCCMP-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-NEXT:    lw a6, 96(a5)
+; RV64IXQCCMP-NEXT:    lw a4, 100(a5)
+; RV64IXQCCMP-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-NEXT:    sw s0, 116(a5)
+; RV64IXQCCMP-NEXT:    sw a7, 112(a5)
+; RV64IXQCCMP-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-NEXT:    sw a4, 100(a5)
+; RV64IXQCCMP-NEXT:    sw a6, 96(a5)
+; RV64IXQCCMP-NEXT:    sw t1, 92(a5)
+; RV64IXQCCMP-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-NEXT:    sw s1, 80(a5)
+; RV64IXQCCMP-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-NEXT:    ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+;
+; RV32IXQCCMP-FP-LABEL: callee_no_irq:
+; RV32IXQCCMP-FP:       # %bb.0:
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -80
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 80
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    lui t1, %hi(var_test_irq)
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+4)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+8)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+12)(t1)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    addi a5, t1, %lo(var_test_irq)
+; RV32IXQCCMP-FP-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw a0, 24(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-FP-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t4, 80(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-FP-NEXT:    lw s1, 92(a5)
+; RV32IXQCCMP-FP-NEXT:    lw t0, 112(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a4, 116(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a7, 96(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a6, 100(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a4, 116(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t0, 112(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a6, 100(a5)
+; RV32IXQCCMP-FP-NEXT:    sw a7, 96(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s1, 92(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t4, 80(a5)
+; RV32IXQCCMP-FP-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-FP-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-FP-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, 24(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+12)(t1)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+8)(t1)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+4)(t1)
+; RV32IXQCCMP-FP-NEXT:    lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq)(t1)
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 80
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 80
+;
+; RV64IXQCCMP-FP-LABEL: callee_no_irq:
+; RV64IXQCCMP-FP:       # %bb.0:
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -160
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    lui t1, %hi(var_test_irq)
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -112(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+4)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -120(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+8)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -128(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    addi a5, t1, %lo(var_test_irq)
+; RV64IXQCCMP-FP-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -144(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -152(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT:    sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-FP-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t4, 80(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-FP-NEXT:    lw s1, 92(a5)
+; RV64IXQCCMP-FP-NEXT:    lw t0, 112(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a4, 116(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a7, 96(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a6, 100(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-FP-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a4, 116(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t0, 112(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a6, 100(a5)
+; RV64IXQCCMP-FP-NEXT:    sw a7, 96(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s1, 92(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t4, 80(a5)
+; RV64IXQCCMP-FP-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-FP-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-FP-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+8)(t1)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq+4)(t1)
+; RV64IXQCCMP-FP-NEXT:    ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT:    sw a0, %lo(var_test_irq)(t1)
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 160
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+;
+; RV32IXQCCMP-SR-LABEL: callee_no_irq:
+; RV32IXQCCMP-SR:       # %bb.0:
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s11}, -80
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 80
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s2, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s3, -20
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s4, -24
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s5, -28
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s6, -32
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s7, -36
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s8, -40
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s9, -44
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-SR-NEXT:    lui t0, %hi(var_test_irq)
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 16(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw a0, 20(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT:    lw t4, 24(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t5, 28(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t6, 32(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s2, 36(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s3, 40(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s4, 44(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s5, 48(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s6, 52(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s7, 56(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s8, 60(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s9, 64(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s10, 68(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s11, 72(a5)
+; RV32IXQCCMP-SR-NEXT:    lw ra, 76(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s1, 80(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t3, 84(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t2, 88(a5)
+; RV32IXQCCMP-SR-NEXT:    lw t1, 92(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a7, 112(a5)
+; RV32IXQCCMP-SR-NEXT:    lw s0, 116(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a3, 120(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 124(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a6, 96(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a4, 100(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a2, 104(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a1, 108(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a0, 124(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a3, 120(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s0, 116(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a7, 112(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a1, 108(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a2, 104(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a4, 100(a5)
+; RV32IXQCCMP-SR-NEXT:    sw a6, 96(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t1, 92(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t2, 88(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t3, 84(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s1, 80(a5)
+; RV32IXQCCMP-SR-NEXT:    sw ra, 76(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s11, 72(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s10, 68(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s9, 64(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s8, 60(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s7, 56(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s6, 52(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s5, 48(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s4, 44(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s3, 40(a5)
+; RV32IXQCCMP-SR-NEXT:    sw s2, 36(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t6, 32(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t5, 28(a5)
+; RV32IXQCCMP-SR-NEXT:    sw t4, 24(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, 20(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, 16(a5)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s11}, 80
+;
+; RV64IXQCCMP-SR-LABEL: callee_no_irq:
+; RV64IXQCCMP-SR:       # %bb.0:
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s11}, -160
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 160
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s2, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s3, -40
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s4, -48
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s5, -56
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s6, -64
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s7, -72
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s8, -80
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s9, -88
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-SR-NEXT:    lui t0, %hi(var_test_irq)
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    addi a5, t0, %lo(var_test_irq)
+; RV64IXQCCMP-SR-NEXT:    lw a0, 16(a5)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw a0, 20(a5)
+; RV64IXQCCMP-SR-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT:    lw t4, 24(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t5, 28(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t6, 32(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s2, 36(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s3, 40(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s4, 44(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s5, 48(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s6, 52(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s7, 56(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s8, 60(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s9, 64(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s10, 68(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s11, 72(a5)
+; RV64IXQCCMP-SR-NEXT:    lw ra, 76(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s1, 80(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t3, 84(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t2, 88(a5)
+; RV64IXQCCMP-SR-NEXT:    lw t1, 92(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a7, 112(a5)
+; RV64IXQCCMP-SR-NEXT:    lw s0, 116(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a3, 120(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a0, 124(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a6, 96(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a4, 100(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a2, 104(a5)
+; RV64IXQCCMP-SR-NEXT:    lw a1, 108(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a0, 124(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a3, 120(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s0, 116(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a7, 112(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a1, 108(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a2, 104(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a4, 100(a5)
+; RV64IXQCCMP-SR-NEXT:    sw a6, 96(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t1, 92(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t2, 88(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t3, 84(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s1, 80(a5)
+; RV64IXQCCMP-SR-NEXT:    sw ra, 76(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s11, 72(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s10, 68(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s9, 64(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s8, 60(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s7, 56(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s6, 52(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s5, 48(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s4, 44(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s3, 40(a5)
+; RV64IXQCCMP-SR-NEXT:    sw s2, 36(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t6, 32(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t5, 28(a5)
+; RV64IXQCCMP-SR-NEXT:    sw t4, 24(a5)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, 20(a5)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, 16(a5)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-SR-NEXT:    ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT:    sw a0, %lo(var_test_irq)(t0)
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s11}, 160
+  %val = load [32 x i32], ptr @var_test_irq
+  store volatile [32 x i32] %val, ptr @var_test_irq
+  ret void
+}
+
+declare void @bar(ptr, ptr)
+declare ptr @llvm.frameaddress.p0(i32 immarg)
+
+define i32 @use_fp(i32 %x) {
+; RV32IXQCCMP-LABEL: use_fp:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -16
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-NEXT:    mv s1, a0
+; RV32IXQCCMP-NEXT:    addi a1, s0, -16
+; RV32IXQCCMP-NEXT:    mv a0, s0
+; RV32IXQCCMP-NEXT:    call bar
+; RV32IXQCCMP-NEXT:    mv a0, s1
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+;
+; RV64IXQCCMP-LABEL: use_fp:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -32
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 32
+; RV64IXQCCMP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-NEXT:    mv s1, a0
+; RV64IXQCCMP-NEXT:    addi a1, s0, -28
+; RV64IXQCCMP-NEXT:    mv a0, s0
+; RV64IXQCCMP-NEXT:    call bar
+; RV64IXQCCMP-NEXT:    mv a0, s1
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa sp, 32
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s1}, 32
+;
+; RV32IXQCCMP-FP-LABEL: use_fp:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -16
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    mv s1, a0
+; RV32IXQCCMP-FP-NEXT:    addi a1, s0, -16
+; RV32IXQCCMP-FP-NEXT:    mv a0, s0
+; RV32IXQCCMP-FP-NEXT:    call bar
+; RV32IXQCCMP-FP-NEXT:    mv a0, s1
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+;
+; RV64IXQCCMP-FP-LABEL: use_fp:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s1}, -32
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 32
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    mv s1, a0
+; RV64IXQCCMP-FP-NEXT:    addi a1, s0, -28
+; RV64IXQCCMP-FP-NEXT:    mv a0, s0
+; RV64IXQCCMP-FP-NEXT:    call bar
+; RV64IXQCCMP-FP-NEXT:    mv a0, s1
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 32
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s1}, 32
+;
+; RV32IXQCCMP-SR-LABEL: use_fp:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0-s1}, -16
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s1, -12
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-SR-NEXT:    mv s1, a0
+; RV32IXQCCMP-SR-NEXT:    addi a1, s0, -16
+; RV32IXQCCMP-SR-NEXT:    mv a0, s0
+; RV32IXQCCMP-SR-NEXT:    call bar
+; RV32IXQCCMP-SR-NEXT:    mv a0, s1
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 16
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+;
+; RV64IXQCCMP-SR-LABEL: use_fp:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.pushfp {ra, s0-s1}, -32
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 32
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s1, -24
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-SR-NEXT:    mv s1, a0
+; RV64IXQCCMP-SR-NEXT:    addi a1, s0, -28
+; RV64IXQCCMP-SR-NEXT:    mv a0, s0
+; RV64IXQCCMP-SR-NEXT:    call bar
+; RV64IXQCCMP-SR-NEXT:    mv a0, s1
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa sp, 32
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s1}, 32
+entry:
+  %var = alloca i32, align 4
+  %0 = tail call ptr @llvm.frameaddress.p0(i32 0)
+  call void @bar(ptr %0, ptr %var)
+  ret i32 %x
+}
+
+define void @spill_x10() {
+; RV32IXQCCMP-LABEL: spill_x10:
+; RV32IXQCCMP:       # %bb.0: # %entry
+; RV32IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -64
+; RV32IXQCCMP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-NEXT:    #APP
+; RV32IXQCCMP-NEXT:    li s10, 0
+; RV32IXQCCMP-NEXT:    #NO_APP
+; RV32IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 64
+;
+; RV64IXQCCMP-LABEL: spill_x10:
+; RV64IXQCCMP:       # %bb.0: # %entry
+; RV64IXQCCMP-NEXT:    qc.cm.push {ra, s0-s11}, -112
+; RV64IXQCCMP-NEXT:    .cfi_def_cfa_offset 112
+; RV64IXQCCMP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-NEXT:    #APP
+; RV64IXQCCMP-NEXT:    li s10, 0
+; RV64IXQCCMP-NEXT:    #NO_APP
+; RV64IXQCCMP-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+;
+; RV32IXQCCMP-FP-LABEL: spill_x10:
+; RV32IXQCCMP-FP:       # %bb.0: # %entry
+; RV32IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -64
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset ra, -4
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s0, -8
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-FP-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV32IXQCCMP-FP-NEXT:    #APP
+; RV32IXQCCMP-FP-NEXT:    li s10, 0
+; RV32IXQCCMP-FP-NEXT:    #NO_APP
+; RV32IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 64
+; RV32IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 64
+;
+; RV64IXQCCMP-FP-LABEL: spill_x10:
+; RV64IXQCCMP-FP:       # %bb.0: # %entry
+; RV64IXQCCMP-FP-NEXT:    qc.cm.pushfp {ra, s0-s11}, -112
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa_offset 112
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset ra, -8
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s0, -16
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-FP-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa s0, 0
+; RV64IXQCCMP-FP-NEXT:    #APP
+; RV64IXQCCMP-FP-NEXT:    li s10, 0
+; RV64IXQCCMP-FP-NEXT:    #NO_APP
+; RV64IXQCCMP-FP-NEXT:    .cfi_def_cfa sp, 112
+; RV64IXQCCMP-FP-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+;
+; RV32IXQCCMP-SR-LABEL: spill_x10:
+; RV32IXQCCMP-SR:       # %bb.0: # %entry
+; RV32IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s11}, -64
+; RV32IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 64
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s10, -48
+; RV32IXQCCMP-SR-NEXT:    .cfi_offset s11, -52
+; RV32IXQCCMP-SR-NEXT:    #APP
+; RV32IXQCCMP-SR-NEXT:    li s10, 0
+; RV32IXQCCMP-SR-NEXT:    #NO_APP
+; RV32IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s11}, 64
+;
+; RV64IXQCCMP-SR-LABEL: spill_x10:
+; RV64IXQCCMP-SR:       # %bb.0: # %entry
+; RV64IXQCCMP-SR-NEXT:    qc.cm.push {ra, s0-s11}, -112
+; RV64IXQCCMP-SR-NEXT:    .cfi_def_cfa_offset 112
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s10, -96
+; RV64IXQCCMP-SR-NEXT:    .cfi_offset s11, -104
+; RV64IXQCCMP-SR-NEXT:    #APP
+; RV64IXQCCMP-SR-NEXT:    li s10, 0
+; RV64IXQCCMP-SR-NEXT:    #NO_APP
+; RV64IXQCCMP-SR-NEXT:    qc.cm.popret {ra, s0-s11}, 112
+entry:
+  tail call void asm sideeffect "li s10, 0", "~{s10}"()
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll b/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll
new file mode 100644
index 0000000000000..f4f3c31f9fb3a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+f,+experimental-xqccmp -target-abi ilp32f -verify-machineinstrs < %s | FileCheck %s --check-prefix=XQCCMP32
+; RUN: llc -mtriple=riscv64 -mattr=+f,+experimental-xqccmp -target-abi lp64f -verify-machineinstrs < %s | FileCheck %s --check-prefix=XQCCMP64
+
+declare void @callee()
+
+; Test the file could be compiled successfully.
+define float @foo(float %arg) {
+; XQCCMP32-LABEL: foo:
+; XQCCMP32:       # %bb.0: # %entry
+; XQCCMP32-NEXT:    qc.cm.push {ra}, -16
+; XQCCMP32-NEXT:    .cfi_def_cfa_offset 16
+; XQCCMP32-NEXT:    .cfi_offset ra, -4
+; XQCCMP32-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
+; XQCCMP32-NEXT:    .cfi_offset fs0, -8
+; XQCCMP32-NEXT:    fmv.s fs0, fa0
+; XQCCMP32-NEXT:    call callee
+; XQCCMP32-NEXT:    fmv.s fa0, fs0
+; XQCCMP32-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
+; XQCCMP32-NEXT:    .cfi_restore fs0
+; XQCCMP32-NEXT:    qc.cm.popret {ra}, 16
+;
+; XQCCMP64-LABEL: foo:
+; XQCCMP64:       # %bb.0: # %entry
+; XQCCMP64-NEXT:    qc.cm.push {ra}, -16
+; XQCCMP64-NEXT:    .cfi_def_cfa_offset 16
+; XQCCMP64-NEXT:    .cfi_offset ra, -8
+; XQCCMP64-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
+; XQCCMP64-NEXT:    .cfi_offset fs0, -12
+; XQCCMP64-NEXT:    fmv.s fs0, fa0
+; XQCCMP64-NEXT:    call callee
+; XQCCMP64-NEXT:    fmv.s fa0, fs0
+; XQCCMP64-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
+; XQCCMP64-NEXT:    .cfi_restore fs0
+; XQCCMP64-NEXT:    qc.cm.popret {ra}, 16
+entry:
+  call void @callee()
+  ret float %arg
+}
+
+define void @foo2(i32 %x, float %y) {
+; XQCCMP32-LABEL: foo2:
+; XQCCMP32:       # %bb.0: # %entry
+; XQCCMP32-NEXT:    qc.cm.push {ra, s0}, -16
+; XQCCMP32-NEXT:    .cfi_def_cfa_offset 16
+; XQCCMP32-NEXT:    .cfi_offset ra, -4
+; XQCCMP32-NEXT:    .cfi_offset s0, -8
+; XQCCMP32-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
+; XQCCMP32-NEXT:    .cfi_offset fs0, -12
+; XQCCMP32-NEXT:    fmv.s fs0, fa0
+; XQCCMP32-NEXT:    mv s0, a0
+; XQCCMP32-NEXT:    call bar
+; XQCCMP32-NEXT:    mv a0, s0
+; XQCCMP32-NEXT:    fmv.s fa0, fs0
+; XQCCMP32-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
+; XQCCMP32-NEXT:    .cfi_restore fs0
+; XQCCMP32-NEXT:    qc.cm.pop {ra, s0}, 16
+; XQCCMP32-NEXT:    .cfi_restore ra
+; XQCCMP32-NEXT:    .cfi_restore s0
+; XQCCMP32-NEXT:    .cfi_def_cfa_offset 0
+; XQCCMP32-NEXT:    tail func
+;
+; XQCCMP64-LABEL: foo2:
+; XQCCMP64:       # %bb.0: # %entry
+; XQCCMP64-NEXT:    qc.cm.push {ra, s0}, -32
+; XQCCMP64-NEXT:    .cfi_def_cfa_offset 32
+; XQCCMP64-NEXT:    .cfi_offset ra, -8
+; XQCCMP64-NEXT:    .cfi_offset s0, -16
+; XQCCMP64-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
+; XQCCMP64-NEXT:    .cfi_offset fs0, -20
+; XQCCMP64-NEXT:    fmv.s fs0, fa0
+; XQCCMP64-NEXT:    mv s0, a0
+; XQCCMP64-NEXT:    call bar
+; XQCCMP64-NEXT:    mv a0, s0
+; XQCCMP64-NEXT:    fmv.s fa0, fs0
+; XQCCMP64-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
+; XQCCMP64-NEXT:    .cfi_restore fs0
+; XQCCMP64-NEXT:    qc.cm.pop {ra, s0}, 32
+; XQCCMP64-NEXT:    .cfi_restore ra
+; XQCCMP64-NEXT:    .cfi_restore s0
+; XQCCMP64-NEXT:    .cfi_def_cfa_offset 0
+; XQCCMP64-NEXT:    tail func
+entry:
+  tail call void @bar()
+  tail call void @func(i32 %x, float %y)
+  ret void
+}
+
+declare void @bar()
+declare void @func(i32, float)

diff  --git a/llvm/test/CodeGen/RISCV/xqccmp_mvas_mvsa.mir b/llvm/test/CodeGen/RISCV/xqccmp_mvas_mvsa.mir
new file mode 100644
index 0000000000000..a4bff254b95ba
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xqccmp_mvas_mvsa.mir
@@ -0,0 +1,28 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -verify-machineinstrs -run-pass=riscv-move-merge -simplify-mir -o - %s | FileCheck -check-prefixes=CHECK32XQCCMP %s
+# RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -verify-machineinstrs -run-pass=riscv-move-merge -simplify-mir -o - %s | FileCheck -check-prefixes=CHECK64XQCCMP %s
+---
+name: mv
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x11, $x10
+    ; CHECK32XQCCMP-LABEL: name: mv
+    ; CHECK32XQCCMP: liveins: $x11, $x10
+    ; CHECK32XQCCMP-NEXT: {{  $}}
+    ; CHECK32XQCCMP-NEXT: $x9, $x8 = QC_CM_MVSA01 implicit $x10, implicit $x11
+    ; CHECK32XQCCMP-NEXT: QC_CM_MVA01S killed $x9, $x8, implicit-def $x10, implicit-def $x11
+    ; CHECK32XQCCMP-NEXT: PseudoRET
+    ;
+    ; CHECK64XQCCMP-LABEL: name: mv
+    ; CHECK64XQCCMP: liveins: $x11, $x10
+    ; CHECK64XQCCMP-NEXT: {{  $}}
+    ; CHECK64XQCCMP-NEXT: $x9, $x8 = QC_CM_MVSA01 implicit $x10, implicit $x11
+    ; CHECK64XQCCMP-NEXT: QC_CM_MVA01S killed $x9, $x8, implicit-def $x10, implicit-def $x11
+    ; CHECK64XQCCMP-NEXT: PseudoRET
+    $x8 = ADDI $x11, 0
+    $x9 = ADDI $x10, 0
+    $x10 = ADDI killed $x9, 0
+    $x11 = ADDI $x8, 0
+    PseudoRET
+...

diff  --git a/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir b/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
index ba2a333f6c9ba..f4465bbaacf75 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
+++ b/llvm/test/CodeGen/RISCV/zcmp-cm-popretz.mir
@@ -25,7 +25,7 @@ body:                   |
     ; CHECK-ZCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -4
     ; CHECK-ZCMP32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP32-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP32-NEXT: CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP32-NEXT: frame-destroy CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
     ;
     ; CHECK-LIBCALL32-LABEL: name: popret_rvlist5
     ; CHECK-LIBCALL32: liveins: $x1, $x8
@@ -47,7 +47,7 @@ body:                   |
     ; CHECK-ZCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
     ; CHECK-ZCMP64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP64-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP64-NEXT: CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP64-NEXT: frame-destroy CM_POPRET 5, 0, implicit-def $x2, implicit $x2, implicit-def $x1, implicit-def $x8
     ;
     ; CHECK-LIBCALL64-LABEL: name: popret_rvlist5
     ; CHECK-LIBCALL64: liveins: $x1, $x8
@@ -115,7 +115,7 @@ body:                   |
     ; CHECK-ZCMP32-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -4
     ; CHECK-ZCMP32-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP32-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP32-NEXT: CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP32-NEXT: frame-destroy CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
     ;
     ; CHECK-LIBCALL32-LABEL: name: popretz_rvlist5
     ; CHECK-LIBCALL32: liveins: $x1, $x8
@@ -138,7 +138,7 @@ body:                   |
     ; CHECK-ZCMP64-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -8
     ; CHECK-ZCMP64-NEXT: $x1 = IMPLICIT_DEF
     ; CHECK-ZCMP64-NEXT: $x8 = IMPLICIT_DEF
-    ; CHECK-ZCMP64-NEXT: CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
+    ; CHECK-ZCMP64-NEXT: frame-destroy CM_POPRETZ 5, 0, implicit-def $x2, implicit-def $x10, implicit $x2, implicit-def $x1, implicit-def $x8
     ;
     ; CHECK-LIBCALL64-LABEL: name: popretz_rvlist5
     ; CHECK-LIBCALL64: liveins: $x1, $x8


        


More information about the llvm-commits mailing list