[llvm] 6269ed2 - [RISCV] Readjusting the framestack for Zcmp
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 6 20:24:26 PDT 2023
Author: WuXinlong
Date: 2023-07-07T11:24:21+08:00
New Revision: 6269ed24cf15c539e69f07988ad8a3f6995b5327
URL: https://github.com/llvm/llvm-project/commit/6269ed24cf15c539e69f07988ad8a3f6995b5327
DIFF: https://github.com/llvm/llvm-project/commit/6269ed24cf15c539e69f07988ad8a3f6995b5327.diff
LOG: [RISCV] Readjusting the framestack for Zcmp
This patch readjusts the frame stack for the push and pop instructions
co-author: @Lukacma
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D134599
Added:
llvm/test/CodeGen/RISCV/push-pop-popret.ll
Modified:
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
llvm/lib/Target/RISCV/RISCVRegisterInfo.td
llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 159db5b70fdbed..ca2d9474d1edfd 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -27,6 +27,11 @@
using namespace llvm;
+static const Register AllPopRegs[] = {
+ RISCV::X1, RISCV::X8, RISCV::X9, RISCV::X18, RISCV::X19,
+ RISCV::X20, RISCV::X21, RISCV::X22, RISCV::X23, RISCV::X24,
+ RISCV::X25, RISCV::X26, RISCV::X27};
+
// For now we use x3, a.k.a gp, as pointer to shadow call stack.
// User should not use x3 in their asm.
static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
@@ -221,6 +226,71 @@ getRestoreLibCallName(const MachineFunction &MF,
return RestoreLibCalls[LibCallID];
}
+// Return encoded value for PUSH/POP instruction, representing
+// registers to store/load.
+static unsigned getPushPopEncoding(const Register MaxReg) {
+ switch (MaxReg) {
+ default:
+ llvm_unreachable("Unexpected Reg for Push/Pop Inst");
+ case RISCV::X27: /*s11*/
+ case RISCV::X26: /*s10*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S11;
+ case RISCV::X25: /*s9*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S9;
+ case RISCV::X24: /*s8*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S8;
+ case RISCV::X23: /*s7*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S7;
+ case RISCV::X22: /*s6*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S6;
+ case RISCV::X21: /*s5*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S5;
+ case RISCV::X20: /*s4*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S4;
+ case RISCV::X19: /*s3*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S3;
+ case RISCV::X18: /*s2*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S2;
+ case RISCV::X9: /*s1*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0_S1;
+ case RISCV::X8: /*s0*/
+ return llvm::RISCVZC::RLISTENCODE::RA_S0;
+ case RISCV::X1: /*ra*/
+ return llvm::RISCVZC::RLISTENCODE::RA;
+ }
+}
+
+// Get the max reg of Push/Pop for restoring callee saved registers.
+static Register getMaxPushPopReg(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI,
+ unsigned &PushPopRegs) {
+ Register MaxPushPopReg = RISCV::NoRegister;
+ PushPopRegs = 0;
+ for (auto &CS : CSI) {
+ Register Reg = CS.getReg();
+ if (RISCV::PGPRRegClass.contains(Reg)) {
+ MaxPushPopReg = std::max(MaxPushPopReg.id(), Reg.id());
+ PushPopRegs += 1;
+ }
+ }
+ // if rlist is {rs, s0-s10}, then s11 will also be included
+ if (MaxPushPopReg == RISCV::X26) {
+ MaxPushPopReg = RISCV::X27;
+ PushPopRegs = 13;
+ }
+ return MaxPushPopReg;
+}
+
+static uint64_t adjSPInPushPop(MachineBasicBlock::iterator MBBI,
+ unsigned RequiredStack, unsigned FreePushStack,
+ bool IsPop) {
+ if (FreePushStack > RequiredStack)
+ RequiredStack = 0;
+ unsigned Spimm = std::min(RequiredStack, 48u);
+ MBBI->getOperand(1).setImm(Spimm);
+ return alignTo(RequiredStack - Spimm, 16);
+}
+
// Return true if the specified function should have a dedicated frame
// pointer register. This is true if frame pointer elimination is
// disabled, if it needs dynamic stack realignment, if the function has
@@ -295,8 +365,8 @@ static Register getFPReg(const RISCVSubtarget &STI) { return RISCV::X8; }
static Register getSPReg(const RISCVSubtarget &STI) { return RISCV::X2; }
static SmallVector<CalleeSavedInfo, 8>
-getNonLibcallCSI(const MachineFunction &MF,
- const std::vector<CalleeSavedInfo> &CSI) {
+getUnmanagedCSI(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
const MachineFrameInfo &MFI = MF.getFrameInfo();
SmallVector<CalleeSavedInfo, 8> NonLibcallCSI;
@@ -412,6 +482,8 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// Emit prologue for shadow call stack.
emitSCSPrologue(MF, MBB, MBBI, DL);
+ auto FirstFrameSetup = MBBI;
+
// Since spillCalleeSavedRegisters may have inserted a libcall, skip past
// any instructions marked as FrameSetup
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
@@ -448,7 +520,8 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// FIXME (note copied from Lanai): This appears to be overallocating. Needs
// investigation. Get the number of bytes to allocate from the FrameInfo.
uint64_t StackSize = getStackSizeWithRVVPadding(MF);
- uint64_t RealStackSize = StackSize + RVFI->getLibCallStackSize();
+ uint64_t RealStackSize =
+ StackSize + RVFI->getLibCallStackSize() + RVFI->getRVPushStackSize();
uint64_t RVVStackSize = RVFI->getRVVStackSize();
// Early exit if there is no need to allocate on the stack
@@ -468,9 +541,21 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
RealStackSize = FirstSPAdjustAmount;
}
- // Allocate space on the stack if necessary.
- RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(-StackSize),
- MachineInstr::FrameSetup, getStackAlign());
+ if (RVFI->isPushable(MF) && FirstFrameSetup->getOpcode() == RISCV::CM_PUSH) {
+ // Use available stack adjustment in push instruction to allocate additional
+ // stack space.
+ unsigned PushStack = RVFI->getRVPushRegs() * (STI.getXLen() / 8);
+ unsigned SpImmBase = RVFI->getRVPushStackSize();
+ StackSize = adjSPInPushPop(FirstFrameSetup, StackSize,
+ (SpImmBase - PushStack), true);
+ }
+
+ if (StackSize != 0) {
+ // Allocate space on the stack if necessary.
+ RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
+ StackOffset::getFixed(-StackSize), MachineInstr::FrameSetup,
+ getStackAlign());
+ }
// Emit ".cfi_def_cfa_offset RealStackSize"
unsigned CFIIndex = MF.addFrameInst(
@@ -487,7 +572,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// to the stack, not before.
// FIXME: assumes exactly one instruction is used to save each callee-saved
// register.
- std::advance(MBBI, getNonLibcallCSI(MF, CSI).size());
+ std::advance(MBBI, getUnmanagedCSI(MF, CSI).size());
// Iterate over list of callee-saved registers and emit .cfi_offset
// directives.
@@ -634,7 +719,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
--MBBI;
}
- const auto &CSI = getNonLibcallCSI(MF, MFI.getCalleeSavedInfo());
+ const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo());
// Skip to before the restores of callee-saved registers
// FIXME: assumes exactly one instruction is used to restore each
@@ -644,7 +729,8 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
LastFrameDestroy = std::prev(MBBI, CSI.size());
uint64_t StackSize = getStackSizeWithRVVPadding(MF);
- uint64_t RealStackSize = StackSize + RVFI->getLibCallStackSize();
+ uint64_t RealStackSize =
+ StackSize + RVFI->getLibCallStackSize() + RVFI->getRVPushStackSize();
uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
uint64_t RVVStackSize = RVFI->getRVVStackSize();
@@ -685,9 +771,19 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
if (FirstSPAdjustAmount)
StackSize = FirstSPAdjustAmount;
+ if (RVFI->isPushable(MF) && MBBI->getOpcode() == RISCV::CM_POP) {
+ // Use available stack adjustment in pop instruction to deallocate stack
+ // space.
+ unsigned PushStack = RVFI->getRVPushRegs() * (STI.getXLen() / 8);
+ unsigned SpImmBase = RVFI->getRVPushStackSize();
+ StackSize = adjSPInPushPop(MBBI, StackSize, (SpImmBase - PushStack), true);
+ }
+
// Deallocate stack
- RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(StackSize),
- MachineInstr::FrameDestroy, getStackAlign());
+ if (StackSize != 0) {
+ RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(StackSize),
+ MachineInstr::FrameDestroy, getStackAlign());
+ }
// Emit epilogue for shadow call stack.
emitSCSEpilogue(MF, MBB, MBBI, DL);
@@ -703,7 +799,7 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
// Callee-saved registers should be referenced relative to the stack
// pointer (positive offset), otherwise use the frame pointer (negative
// offset).
- const auto &CSI = getNonLibcallCSI(MF, MFI.getCalleeSavedInfo());
+ const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo());
int MinCSFI = 0;
int MaxCSFI = -1;
StackOffset Offset;
@@ -852,7 +948,8 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
assert(!RI->hasStackRealignment(MF) &&
"Can't index across variable sized realign");
Offset += StackOffset::get(getStackSizeWithRVVPadding(MF) +
- RVFI->getLibCallStackSize(),
+ RVFI->getLibCallStackSize() +
+ RVFI->getRVPushStackSize(),
RVFI->getRVVStackSize());
} else {
Offset += StackOffset::getFixed(MFI.getStackSize());
@@ -860,9 +957,10 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
} else if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
// Ensure the base of the RVV stack is correctly aligned: add on the
// alignment padding.
- int ScalarLocalVarSize =
- MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
- RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
+ int ScalarLocalVarSize = MFI.getStackSize() -
+ RVFI->getCalleeSavedStackSize() -
+ RVFI->getRVPushStackSize() -
+ RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
Offset += StackOffset::get(ScalarLocalVarSize, RVFI->getRVVStackSize());
}
return Offset;
@@ -1118,7 +1216,8 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
RVFI->setBranchRelaxationScratchFrameIndex(FI);
}
- if (MFI.getCalleeSavedInfo().empty() || RVFI->useSaveRestoreLibCalls(MF)) {
+ if (MFI.getCalleeSavedInfo().empty() || RVFI->useSaveRestoreLibCalls(MF) ||
+ RVFI->isPushable(MF)) {
RVFI->setCalleeSavedStackSize(0);
return;
}
@@ -1194,7 +1293,7 @@ RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
// Disable SplitSPAdjust if save-restore libcall is used. The callee-saved
// registers will be pushed by the save-restore libcalls, so we don't have to
// split the SP adjustment in this case.
- if (RVFI->getLibCallStackSize())
+ if (RVFI->getLibCallStackSize() || RVFI->getRVPushStackSize())
return 0;
// Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
@@ -1223,8 +1322,28 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
if (MI != MBB.end() && !MI->isDebugInstr())
DL = MI->getDebugLoc();
- const char *SpillLibCall = getSpillLibCallName(*MF, CSI);
- if (SpillLibCall) {
+ // Emit CM.PUSH with base SPimm & evaluate Push stack
+ RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
+ if (RVFI->isPushable(*MF)) {
+ unsigned PushPopRegs = 0;
+ Register MaxReg = getMaxPushPopReg(*MF, CSI, PushPopRegs);
+ RVFI->setRVPushRegs(PushPopRegs);
+ RVFI->setRVPushStackSize(alignTo((STI.getXLen() / 8) * PushPopRegs, 16));
+
+ if (MaxReg != RISCV::NoRegister) {
+ // Use encoded number to represent registers to spill.
+ unsigned RegEnc = getPushPopEncoding(MaxReg);
+ RVFI->setRVPushRlist(RegEnc);
+ MachineInstrBuilder PushBuilder =
+ BuildMI(MBB, MI, DL, TII.get(RISCV::CM_PUSH))
+ .setMIFlag(MachineInstr::FrameSetup);
+ PushBuilder.addImm((int64_t)RegEnc);
+ PushBuilder.addImm(0);
+
+ for (unsigned i = 0; i < PushPopRegs; i++)
+ PushBuilder.addUse(AllPopRegs[i], RegState::Implicit);
+ }
+ } else if (const char *SpillLibCall = getSpillLibCallName(*MF, CSI)) {
// Add spill libcall via non-callee-saved register t0.
BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoCALLReg), RISCV::X5)
.addExternalSymbol(SpillLibCall, RISCVII::MO_CALL)
@@ -1235,9 +1354,9 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
MBB.addLiveIn(CS.getReg());
}
- // Manually spill values not spilled by libcall.
- const auto &NonLibcallCSI = getNonLibcallCSI(*MF, CSI);
- for (auto &CS : NonLibcallCSI) {
+ // Manually spill values not spilled by libcall & Push/Pop.
+ const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
+ for (auto &CS : UnmanagedCSI) {
// Insert the spill to the stack frame.
Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
@@ -1260,14 +1379,14 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
if (MI != MBB.end() && !MI->isDebugInstr())
DL = MI->getDebugLoc();
- // Manually restore values not restored by libcall.
+ // Manually restore values not restored by libcall & Push/Pop.
// Keep the same order as in the prologue. There is no need to reverse the
// order in the epilogue. In addition, the return address will be restored
// first in the epilogue. It increases the opportunity to avoid the
// load-to-use data hazard between loading RA and return by RA.
// loadRegFromStackSlot can insert multiple instructions.
- const auto &NonLibcallCSI = getNonLibcallCSI(*MF, CSI);
- for (auto &CS : NonLibcallCSI) {
+ const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
+ for (auto &CS : UnmanagedCSI) {
Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
@@ -1275,22 +1394,37 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
}
- const char *RestoreLibCall = getRestoreLibCallName(*MF, CSI);
- if (RestoreLibCall) {
- // Add restore libcall via tail call.
- MachineBasicBlock::iterator NewMI =
- BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoTAIL))
- .addExternalSymbol(RestoreLibCall, RISCVII::MO_CALL)
- .setMIFlag(MachineInstr::FrameDestroy);
-
- // Remove trailing returns, since the terminator is now a tail call to the
- // restore function.
- if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
- NewMI->copyImplicitOps(*MF, *MI);
- MI->eraseFromParent();
+ RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
+ if (RVFI->isPushable(*MF)) {
+ int RegEnc = RVFI->getRVPushRlist();
+ if (RegEnc != llvm::RISCVZC::RLISTENCODE::INVALID_RLIST) {
+ MachineInstrBuilder PopBuilder =
+ BuildMI(MBB, MI, DL, TII.get(RISCV::CM_POP))
+ .setMIFlag(MachineInstr::FrameDestroy);
+ // Use encoded number to represent registers to restore.
+ PopBuilder.addImm(RegEnc);
+ PopBuilder.addImm(0);
+
+ for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
+ PopBuilder.addDef(AllPopRegs[i], RegState::ImplicitDefine);
+ }
+ } else {
+ const char *RestoreLibCall = getRestoreLibCallName(*MF, CSI);
+ if (RestoreLibCall) {
+ // Add restore libcall via tail call.
+ MachineBasicBlock::iterator NewMI =
+ BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoTAIL))
+ .addExternalSymbol(RestoreLibCall, RISCVII::MO_CALL)
+ .setMIFlag(MachineInstr::FrameDestroy);
+
+ // Remove trailing returns, since the terminator is now a tail call to the
+ // restore function.
+ if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
+ NewMI->copyImplicitOps(*MF, *MI);
+ MI->eraseFromParent();
+ }
}
}
-
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
index 1103da3cb6a0ee..d769fcb7f9c595 100644
--- a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
@@ -71,6 +71,12 @@ class RISCVMachineFunctionInfo : public MachineFunctionInfo {
/// Registers that have been sign extended from i32.
SmallVector<Register, 8> SExt32Registers;
+ /// Size of stack frame for Zcmp PUSH/POP
+ unsigned RVPushStackSize = 0;
+ unsigned RVPushRegs = 0;
+ int RVPushRlist = llvm::RISCVZC::RLISTENCODE::INVALID_RLIST;
+ bool RVPushable = false;
+
public:
RISCVMachineFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) {}
@@ -122,6 +128,21 @@ class RISCVMachineFunctionInfo : public MachineFunctionInfo {
unsigned getCalleeSavedStackSize() const { return CalleeSavedStackSize; }
void setCalleeSavedStackSize(unsigned Size) { CalleeSavedStackSize = Size; }
+ uint64_t isPushable(const MachineFunction &MF) const {
+ return (!useSaveRestoreLibCalls(MF) &&
+ MF.getSubtarget<RISCVSubtarget>().hasStdExtZcmp() &&
+ !MF.getTarget().Options.DisableFramePointerElim(MF));
+ }
+
+ int getRVPushRlist() const { return RVPushRlist; }
+ void setRVPushRlist(int Rlist) { RVPushRlist = Rlist; }
+
+ unsigned getRVPushRegs() const { return RVPushRegs; }
+ void setRVPushRegs(unsigned Regs) { RVPushRegs = Regs; }
+
+ unsigned getRVPushStackSize() const { return RVPushStackSize; }
+ void setRVPushStackSize(unsigned Size) { RVPushStackSize = Size; }
+
void initializeBaseYamlFields(const yaml::RISCVMachineFunctionInfo &YamlMFI);
void addSExt32Register(Register Reg);
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 71c06bcdfca72d..8c7c5a2382de45 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -149,7 +149,7 @@ bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
Register Reg,
int &FrameIdx) const {
const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
- if (!RVFI->useSaveRestoreLibCalls(MF))
+ if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
return false;
const auto *FII =
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index fe29575d255dde..c8c7c02350ebcf 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -165,6 +165,15 @@ def SP : GPRRegisterClass<(add X2)>;
def SR07 : GPRRegisterClass<(add (sequence "X%u", 8, 9),
(sequence "X%u", 18, 23))>;
+// Registers saveable by PUSH/POP instruction in Zcmp extension
+def PGPR : RegisterClass<"RISCV", [XLenVT], 32, (add
+ (sequence "X%u", 8, 9),
+ (sequence "X%u", 18, 27),
+ X1
+ )> {
+ let RegInfos = XLenRI;
+}
+
// Floating point registers
let RegAltNameIndices = [ABIRegAltName] in {
def F0_H : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 6cbac2b0685f79..c240469cc686de 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -9,6 +9,10 @@
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all < %s \
; RUN: | FileCheck %s -check-prefix=RV32I-WITH-FP
+; RUN: llc -mtriple=riscv32 -mattr=+zcmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32IZCMP
+; RUN: llc -mtriple=riscv32 -mattr=+zcmp -verify-machineinstrs \
+; RUN: -frame-pointer=all < %s | FileCheck %s -check-prefixes=RV32IZCMP-WITH-FP
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f -verify-machineinstrs < %s \
@@ -19,6 +23,10 @@
; RUN: | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all < %s \
; RUN: | FileCheck %s -check-prefix=RV64I-WITH-FP
+; RUN: llc -mtriple=riscv64 -mattr=+zcmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64IZCMP
+; RUN: llc -mtriple=riscv64 -mattr=+zcmp -verify-machineinstrs \
+; RUN: -frame-pointer=all < %s | FileCheck %s -check-prefixes=RV64IZCMP-WITH-FP
@var = global [32 x i32] zeroinitializer
@@ -249,6 +257,203 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: addi sp, sp, 80
; RV32I-WITH-FP-NEXT: ret
;
+; RV32IZCMP-LABEL: callee:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-NEXT: lui a7, %hi(var)
+; RV32IZCMP-NEXT: lw a0, %lo(var)(a7)
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var+4)(a7)
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var+8)(a7)
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var+12)(a7)
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: addi a5, a7, %lo(var)
+; RV32IZCMP-NEXT: lw a0, 16(a5)
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 20(a5)
+; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw t4, 24(a5)
+; RV32IZCMP-NEXT: lw t5, 28(a5)
+; RV32IZCMP-NEXT: lw t6, 32(a5)
+; RV32IZCMP-NEXT: lw s2, 36(a5)
+; RV32IZCMP-NEXT: lw s3, 40(a5)
+; RV32IZCMP-NEXT: lw s4, 44(a5)
+; RV32IZCMP-NEXT: lw s5, 48(a5)
+; RV32IZCMP-NEXT: lw s6, 52(a5)
+; RV32IZCMP-NEXT: lw s7, 56(a5)
+; RV32IZCMP-NEXT: lw s8, 60(a5)
+; RV32IZCMP-NEXT: lw s9, 64(a5)
+; RV32IZCMP-NEXT: lw s10, 68(a5)
+; RV32IZCMP-NEXT: lw s11, 72(a5)
+; RV32IZCMP-NEXT: lw ra, 76(a5)
+; RV32IZCMP-NEXT: lw s1, 80(a5)
+; RV32IZCMP-NEXT: lw t3, 84(a5)
+; RV32IZCMP-NEXT: lw t2, 88(a5)
+; RV32IZCMP-NEXT: lw t1, 92(a5)
+; RV32IZCMP-NEXT: lw t0, 96(a5)
+; RV32IZCMP-NEXT: lw s0, 100(a5)
+; RV32IZCMP-NEXT: lw a6, 104(a5)
+; RV32IZCMP-NEXT: lw a4, 108(a5)
+; RV32IZCMP-NEXT: lw a0, 124(a5)
+; RV32IZCMP-NEXT: lw a1, 120(a5)
+; RV32IZCMP-NEXT: lw a2, 116(a5)
+; RV32IZCMP-NEXT: lw a3, 112(a5)
+; RV32IZCMP-NEXT: sw a0, 124(a5)
+; RV32IZCMP-NEXT: sw a1, 120(a5)
+; RV32IZCMP-NEXT: sw a2, 116(a5)
+; RV32IZCMP-NEXT: sw a3, 112(a5)
+; RV32IZCMP-NEXT: sw a4, 108(a5)
+; RV32IZCMP-NEXT: sw a6, 104(a5)
+; RV32IZCMP-NEXT: sw s0, 100(a5)
+; RV32IZCMP-NEXT: sw t0, 96(a5)
+; RV32IZCMP-NEXT: sw t1, 92(a5)
+; RV32IZCMP-NEXT: sw t2, 88(a5)
+; RV32IZCMP-NEXT: sw t3, 84(a5)
+; RV32IZCMP-NEXT: sw s1, 80(a5)
+; RV32IZCMP-NEXT: sw ra, 76(a5)
+; RV32IZCMP-NEXT: sw s11, 72(a5)
+; RV32IZCMP-NEXT: sw s10, 68(a5)
+; RV32IZCMP-NEXT: sw s9, 64(a5)
+; RV32IZCMP-NEXT: sw s8, 60(a5)
+; RV32IZCMP-NEXT: sw s7, 56(a5)
+; RV32IZCMP-NEXT: sw s6, 52(a5)
+; RV32IZCMP-NEXT: sw s5, 48(a5)
+; RV32IZCMP-NEXT: sw s4, 44(a5)
+; RV32IZCMP-NEXT: sw s3, 40(a5)
+; RV32IZCMP-NEXT: sw s2, 36(a5)
+; RV32IZCMP-NEXT: sw t6, 32(a5)
+; RV32IZCMP-NEXT: sw t5, 28(a5)
+; RV32IZCMP-NEXT: sw t4, 24(a5)
+; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 20(a5)
+; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+12)(a7)
+; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+8)(a7)
+; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+4)(a7)
+; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var)(a7)
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 96
+; RV32IZCMP-NEXT: ret
+;
+; RV32IZCMP-WITH-FP-LABEL: callee:
+; RV32IZCMP-WITH-FP: # %bb.0:
+; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, -80
+; RV32IZCMP-WITH-FP-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: addi s0, sp, 80
+; RV32IZCMP-WITH-FP-NEXT: lui a7, %hi(var)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(a7)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(a7)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(a7)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(a7)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: addi a5, a7, %lo(var)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 16(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 20(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 24(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw t5, 28(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw t6, 32(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s2, 36(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s3, 40(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s4, 44(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s5, 48(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s6, 52(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s7, 56(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s8, 60(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s9, 64(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s10, 68(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s11, 72(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw ra, 76(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw t4, 80(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw t3, 84(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw t2, 88(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw s1, 92(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw t1, 96(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw t0, 100(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a6, 104(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a4, 108(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 124(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a1, 120(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a2, 116(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a3, 112(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 124(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a1, 120(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a2, 116(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a3, 112(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a4, 108(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw a6, 104(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t0, 100(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t1, 96(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s1, 92(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t2, 88(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t3, 84(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t4, 80(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw ra, 76(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s11, 72(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s10, 68(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s9, 64(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s8, 60(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s7, 56(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s6, 52(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s5, 48(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s4, 44(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s3, 40(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw s2, 36(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t6, 32(a5)
+; RV32IZCMP-WITH-FP-NEXT: sw t5, 28(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 24(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 20(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(a7)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(a7)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(a7)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(a7)
+; RV32IZCMP-WITH-FP-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, 80
+; RV32IZCMP-WITH-FP-NEXT: ret
+;
; RV64I-LABEL: callee:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -160
@@ -471,6 +676,203 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: addi sp, sp, 160
; RV64I-WITH-FP-NEXT: ret
+;
+; RV64IZCMP-LABEL: callee:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: lui a7, %hi(var)
+; RV64IZCMP-NEXT: lw a0, %lo(var)(a7)
+; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+4)(a7)
+; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+8)(a7)
+; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+12)(a7)
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: addi a5, a7, %lo(var)
+; RV64IZCMP-NEXT: lw a0, 16(a5)
+; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 20(a5)
+; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw t4, 24(a5)
+; RV64IZCMP-NEXT: lw t5, 28(a5)
+; RV64IZCMP-NEXT: lw t6, 32(a5)
+; RV64IZCMP-NEXT: lw s2, 36(a5)
+; RV64IZCMP-NEXT: lw s3, 40(a5)
+; RV64IZCMP-NEXT: lw s4, 44(a5)
+; RV64IZCMP-NEXT: lw s5, 48(a5)
+; RV64IZCMP-NEXT: lw s6, 52(a5)
+; RV64IZCMP-NEXT: lw s7, 56(a5)
+; RV64IZCMP-NEXT: lw s8, 60(a5)
+; RV64IZCMP-NEXT: lw s9, 64(a5)
+; RV64IZCMP-NEXT: lw s10, 68(a5)
+; RV64IZCMP-NEXT: lw s11, 72(a5)
+; RV64IZCMP-NEXT: lw ra, 76(a5)
+; RV64IZCMP-NEXT: lw s1, 80(a5)
+; RV64IZCMP-NEXT: lw t3, 84(a5)
+; RV64IZCMP-NEXT: lw t2, 88(a5)
+; RV64IZCMP-NEXT: lw t1, 92(a5)
+; RV64IZCMP-NEXT: lw t0, 96(a5)
+; RV64IZCMP-NEXT: lw s0, 100(a5)
+; RV64IZCMP-NEXT: lw a6, 104(a5)
+; RV64IZCMP-NEXT: lw a4, 108(a5)
+; RV64IZCMP-NEXT: lw a0, 124(a5)
+; RV64IZCMP-NEXT: lw a1, 120(a5)
+; RV64IZCMP-NEXT: lw a2, 116(a5)
+; RV64IZCMP-NEXT: lw a3, 112(a5)
+; RV64IZCMP-NEXT: sw a0, 124(a5)
+; RV64IZCMP-NEXT: sw a1, 120(a5)
+; RV64IZCMP-NEXT: sw a2, 116(a5)
+; RV64IZCMP-NEXT: sw a3, 112(a5)
+; RV64IZCMP-NEXT: sw a4, 108(a5)
+; RV64IZCMP-NEXT: sw a6, 104(a5)
+; RV64IZCMP-NEXT: sw s0, 100(a5)
+; RV64IZCMP-NEXT: sw t0, 96(a5)
+; RV64IZCMP-NEXT: sw t1, 92(a5)
+; RV64IZCMP-NEXT: sw t2, 88(a5)
+; RV64IZCMP-NEXT: sw t3, 84(a5)
+; RV64IZCMP-NEXT: sw s1, 80(a5)
+; RV64IZCMP-NEXT: sw ra, 76(a5)
+; RV64IZCMP-NEXT: sw s11, 72(a5)
+; RV64IZCMP-NEXT: sw s10, 68(a5)
+; RV64IZCMP-NEXT: sw s9, 64(a5)
+; RV64IZCMP-NEXT: sw s8, 60(a5)
+; RV64IZCMP-NEXT: sw s7, 56(a5)
+; RV64IZCMP-NEXT: sw s6, 52(a5)
+; RV64IZCMP-NEXT: sw s5, 48(a5)
+; RV64IZCMP-NEXT: sw s4, 44(a5)
+; RV64IZCMP-NEXT: sw s3, 40(a5)
+; RV64IZCMP-NEXT: sw s2, 36(a5)
+; RV64IZCMP-NEXT: sw t6, 32(a5)
+; RV64IZCMP-NEXT: sw t5, 28(a5)
+; RV64IZCMP-NEXT: sw t4, 24(a5)
+; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 20(a5)
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 16(a5)
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var+12)(a7)
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var+8)(a7)
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var+4)(a7)
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var)(a7)
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT: ret
+;
+; RV64IZCMP-WITH-FP-LABEL: callee:
+; RV64IZCMP-WITH-FP: # %bb.0:
+; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, -160
+; RV64IZCMP-WITH-FP-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s8, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: addi s0, sp, 160
+; RV64IZCMP-WITH-FP-NEXT: lui a7, %hi(var)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(a7)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(a7)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(a7)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(a7)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: addi a5, a7, %lo(var)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 16(a5)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 20(a5)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -152(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 24(a5)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw t5, 28(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw t6, 32(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s2, 36(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s3, 40(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s4, 44(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s5, 48(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s6, 52(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s7, 56(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s8, 60(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s9, 64(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s10, 68(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s11, 72(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw ra, 76(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw t4, 80(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw t3, 84(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw t2, 88(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw s1, 92(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw t1, 96(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw t0, 100(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a6, 104(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a4, 108(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 124(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a1, 120(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a2, 116(a5)
+; RV64IZCMP-WITH-FP-NEXT: lw a3, 112(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 124(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a1, 120(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a2, 116(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a3, 112(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a4, 108(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw a6, 104(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t0, 100(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t1, 96(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s1, 92(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t2, 88(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t3, 84(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t4, 80(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw ra, 76(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s11, 72(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s10, 68(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s9, 64(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s8, 60(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s7, 56(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s6, 52(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s5, 48(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s4, 44(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s3, 40(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw s2, 36(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t6, 32(a5)
+; RV64IZCMP-WITH-FP-NEXT: sw t5, 28(a5)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 24(a5)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 20(a5)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 16(a5)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(a7)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(a7)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(a7)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(a7)
+; RV64IZCMP-WITH-FP-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s8, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, 160
+; RV64IZCMP-WITH-FP-NEXT: ret
%val = load [32 x i32], ptr @var
store volatile [32 x i32] %val, ptr @var
ret void
@@ -769,6 +1171,271 @@ define void @caller() nounwind {
; RV32I-WITH-FP-NEXT: addi sp, sp, 144
; RV32I-WITH-FP-NEXT: ret
;
+; RV32IZCMP-LABEL: caller:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -112
+; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: lui s0, %hi(var)
+; RV32IZCMP-NEXT: lw a0, %lo(var)(s0)
+; RV32IZCMP-NEXT: sw a0, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var+4)(s0)
+; RV32IZCMP-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var+8)(s0)
+; RV32IZCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var+12)(s0)
+; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: addi s1, s0, %lo(var)
+; RV32IZCMP-NEXT: lw a0, 16(s1)
+; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 20(s1)
+; RV32IZCMP-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 24(s1)
+; RV32IZCMP-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 28(s1)
+; RV32IZCMP-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 32(s1)
+; RV32IZCMP-NEXT: sw a0, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 36(s1)
+; RV32IZCMP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 40(s1)
+; RV32IZCMP-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 44(s1)
+; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 48(s1)
+; RV32IZCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 52(s1)
+; RV32IZCMP-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 56(s1)
+; RV32IZCMP-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 60(s1)
+; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 64(s1)
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 68(s1)
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 72(s1)
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 76(s1)
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 80(s1)
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 84(s1)
+; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw s4, 88(s1)
+; RV32IZCMP-NEXT: lw s5, 92(s1)
+; RV32IZCMP-NEXT: lw s6, 96(s1)
+; RV32IZCMP-NEXT: lw s7, 100(s1)
+; RV32IZCMP-NEXT: lw s8, 104(s1)
+; RV32IZCMP-NEXT: lw s9, 108(s1)
+; RV32IZCMP-NEXT: lw s10, 112(s1)
+; RV32IZCMP-NEXT: lw s11, 116(s1)
+; RV32IZCMP-NEXT: lw s2, 120(s1)
+; RV32IZCMP-NEXT: lw s3, 124(s1)
+; RV32IZCMP-NEXT: call callee at plt
+; RV32IZCMP-NEXT: sw s3, 124(s1)
+; RV32IZCMP-NEXT: sw s2, 120(s1)
+; RV32IZCMP-NEXT: sw s11, 116(s1)
+; RV32IZCMP-NEXT: sw s10, 112(s1)
+; RV32IZCMP-NEXT: sw s9, 108(s1)
+; RV32IZCMP-NEXT: sw s8, 104(s1)
+; RV32IZCMP-NEXT: sw s7, 100(s1)
+; RV32IZCMP-NEXT: sw s6, 96(s1)
+; RV32IZCMP-NEXT: sw s5, 92(s1)
+; RV32IZCMP-NEXT: sw s4, 88(s1)
+; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 84(s1)
+; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 80(s1)
+; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 76(s1)
+; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 72(s1)
+; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 68(s1)
+; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 64(s1)
+; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 60(s1)
+; RV32IZCMP-NEXT: lw a0, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 56(s1)
+; RV32IZCMP-NEXT: lw a0, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 52(s1)
+; RV32IZCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 48(s1)
+; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 44(s1)
+; RV32IZCMP-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 40(s1)
+; RV32IZCMP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 36(s1)
+; RV32IZCMP-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 32(s1)
+; RV32IZCMP-NEXT: lw a0, 64(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 28(s1)
+; RV32IZCMP-NEXT: lw a0, 68(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 24(s1)
+; RV32IZCMP-NEXT: lw a0, 72(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 20(s1)
+; RV32IZCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 16(s1)
+; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+12)(s0)
+; RV32IZCMP-NEXT: lw a0, 84(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+8)(s0)
+; RV32IZCMP-NEXT: lw a0, 88(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+4)(s0)
+; RV32IZCMP-NEXT: lw a0, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var)(s0)
+; RV32IZCMP-NEXT: addi sp, sp, 48
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112
+; RV32IZCMP-NEXT: ret
+;
+; RV32IZCMP-WITH-FP-LABEL: caller:
+; RV32IZCMP-WITH-FP: # %bb.0:
+; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, -144
+; RV32IZCMP-WITH-FP-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: addi s0, sp, 144
+; RV32IZCMP-WITH-FP-NEXT: lui s6, %hi(var)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(s6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(s6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(s6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: addi s1, s6, %lo(var)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 16(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 20(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 24(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 28(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -84(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 32(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -88(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 36(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -92(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 40(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -96(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 44(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -100(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 48(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -104(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 52(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -108(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 56(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -112(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 60(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -116(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 64(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -120(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 68(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -124(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 72(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -128(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 76(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -132(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 80(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -136(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 84(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -140(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw a0, 88(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw a0, -144(s0) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: lw s8, 92(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s9, 96(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s10, 100(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s11, 104(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s2, 108(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s3, 112(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s4, 116(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s5, 120(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw s7, 124(s1)
+; RV32IZCMP-WITH-FP-NEXT: call callee at plt
+; RV32IZCMP-WITH-FP-NEXT: sw s7, 124(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s5, 120(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s4, 116(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s3, 112(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s2, 108(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s11, 104(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s10, 100(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s9, 96(s1)
+; RV32IZCMP-WITH-FP-NEXT: sw s8, 92(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -144(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 88(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -140(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 84(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -136(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 80(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -132(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 76(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -128(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 72(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -124(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 68(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -120(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 64(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -116(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 60(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -112(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 56(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -108(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 52(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -104(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 48(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -100(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 44(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -96(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 40(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -92(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 36(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -88(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 32(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -84(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 28(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 24(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 20(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, 16(s1)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(s6)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(s6)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(s6)
+; RV32IZCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(s6)
+; RV32IZCMP-WITH-FP-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, 144
+; RV32IZCMP-WITH-FP-NEXT: ret
+;
; RV64I-LABEL: caller:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -288
@@ -1057,7 +1724,271 @@ define void @caller() nounwind {
; RV64I-WITH-FP-NEXT: ld s11, 184(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: addi sp, sp, 288
; RV64I-WITH-FP-NEXT: ret
-
+;
+; RV64IZCMP-LABEL: caller:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: addi sp, sp, -128
+; RV64IZCMP-NEXT: lui s0, %hi(var)
+; RV64IZCMP-NEXT: lw a0, %lo(var)(s0)
+; RV64IZCMP-NEXT: sd a0, 168(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+4)(s0)
+; RV64IZCMP-NEXT: sd a0, 160(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+8)(s0)
+; RV64IZCMP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+12)(s0)
+; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: addi s1, s0, %lo(var)
+; RV64IZCMP-NEXT: lw a0, 16(s1)
+; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 20(s1)
+; RV64IZCMP-NEXT: sd a0, 128(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 24(s1)
+; RV64IZCMP-NEXT: sd a0, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 28(s1)
+; RV64IZCMP-NEXT: sd a0, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 32(s1)
+; RV64IZCMP-NEXT: sd a0, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 36(s1)
+; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 40(s1)
+; RV64IZCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 44(s1)
+; RV64IZCMP-NEXT: sd a0, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 48(s1)
+; RV64IZCMP-NEXT: sd a0, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 52(s1)
+; RV64IZCMP-NEXT: sd a0, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 56(s1)
+; RV64IZCMP-NEXT: sd a0, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 60(s1)
+; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 64(s1)
+; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 68(s1)
+; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 72(s1)
+; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 76(s1)
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 80(s1)
+; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 84(s1)
+; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw s4, 88(s1)
+; RV64IZCMP-NEXT: lw s5, 92(s1)
+; RV64IZCMP-NEXT: lw s6, 96(s1)
+; RV64IZCMP-NEXT: lw s7, 100(s1)
+; RV64IZCMP-NEXT: lw s8, 104(s1)
+; RV64IZCMP-NEXT: lw s9, 108(s1)
+; RV64IZCMP-NEXT: lw s10, 112(s1)
+; RV64IZCMP-NEXT: lw s11, 116(s1)
+; RV64IZCMP-NEXT: lw s2, 120(s1)
+; RV64IZCMP-NEXT: lw s3, 124(s1)
+; RV64IZCMP-NEXT: call callee at plt
+; RV64IZCMP-NEXT: sw s3, 124(s1)
+; RV64IZCMP-NEXT: sw s2, 120(s1)
+; RV64IZCMP-NEXT: sw s11, 116(s1)
+; RV64IZCMP-NEXT: sw s10, 112(s1)
+; RV64IZCMP-NEXT: sw s9, 108(s1)
+; RV64IZCMP-NEXT: sw s8, 104(s1)
+; RV64IZCMP-NEXT: sw s7, 100(s1)
+; RV64IZCMP-NEXT: sw s6, 96(s1)
+; RV64IZCMP-NEXT: sw s5, 92(s1)
+; RV64IZCMP-NEXT: sw s4, 88(s1)
+; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 84(s1)
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 80(s1)
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 76(s1)
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 72(s1)
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 68(s1)
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 64(s1)
+; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 60(s1)
+; RV64IZCMP-NEXT: ld a0, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 56(s1)
+; RV64IZCMP-NEXT: ld a0, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 52(s1)
+; RV64IZCMP-NEXT: ld a0, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 48(s1)
+; RV64IZCMP-NEXT: ld a0, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 44(s1)
+; RV64IZCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 40(s1)
+; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 36(s1)
+; RV64IZCMP-NEXT: ld a0, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 32(s1)
+; RV64IZCMP-NEXT: ld a0, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 28(s1)
+; RV64IZCMP-NEXT: ld a0, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 24(s1)
+; RV64IZCMP-NEXT: ld a0, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 20(s1)
+; RV64IZCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 16(s1)
+; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var+12)(s0)
+; RV64IZCMP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var+8)(s0)
+; RV64IZCMP-NEXT: ld a0, 160(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var+4)(s0)
+; RV64IZCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var)(s0)
+; RV64IZCMP-NEXT: addi sp, sp, 128
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT: ret
+;
+; RV64IZCMP-WITH-FP-LABEL: caller:
+; RV64IZCMP-WITH-FP: # %bb.0:
+; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, -288
+; RV64IZCMP-WITH-FP-NEXT: sd ra, 280(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s0, 272(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s1, 264(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s2, 256(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s3, 248(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s4, 240(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s5, 232(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s6, 224(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s7, 216(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s8, 208(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s9, 200(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s10, 192(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: sd s11, 184(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: addi s0, sp, 288
+; RV64IZCMP-WITH-FP-NEXT: lui s6, %hi(var)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(s6)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(s6)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(s6)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: addi s1, s6, %lo(var)
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 16(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 20(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -152(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 24(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 28(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -168(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 32(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -176(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 36(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -184(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 40(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -192(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 44(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -200(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 48(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -208(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 52(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -216(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 56(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -224(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 60(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -232(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 64(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -240(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 68(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -248(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 72(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -256(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 76(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -264(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 80(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -272(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 84(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -280(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw a0, 88(s1)
+; RV64IZCMP-WITH-FP-NEXT: sd a0, -288(s0) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: lw s8, 92(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s9, 96(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s10, 100(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s11, 104(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s2, 108(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s3, 112(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s4, 116(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s5, 120(s1)
+; RV64IZCMP-WITH-FP-NEXT: lw s7, 124(s1)
+; RV64IZCMP-WITH-FP-NEXT: call callee at plt
+; RV64IZCMP-WITH-FP-NEXT: sw s7, 124(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s5, 120(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s4, 116(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s3, 112(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s2, 108(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s11, 104(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s10, 100(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s9, 96(s1)
+; RV64IZCMP-WITH-FP-NEXT: sw s8, 92(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -288(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 88(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -280(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 84(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -272(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 80(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -264(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 76(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -256(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 72(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -248(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 68(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -240(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 64(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -232(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 60(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -224(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 56(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -216(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 52(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -208(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 48(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -200(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 44(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -192(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 40(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -184(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 36(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -176(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 32(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -168(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 28(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 24(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 20(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, 16(s1)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(s6)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(s6)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(s6)
+; RV64IZCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(s6)
+; RV64IZCMP-WITH-FP-NEXT: ld ra, 280(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s0, 272(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s1, 264(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s2, 256(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s3, 248(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s4, 240(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s5, 232(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s6, 224(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s7, 216(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s8, 208(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s9, 200(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s10, 192(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: ld s11, 184(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, 288
+; RV64IZCMP-WITH-FP-NEXT: ret
%val = load [32 x i32], ptr @var
call void @callee()
store volatile [32 x i32] %val, ptr @var
diff --git a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
index ff9df96516d3c8..7171277fbfd416 100644
--- a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
+++ b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
@@ -36,22 +36,14 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
;
; CHECK32ZCMP-LABEL: zcmp_mv:
; CHECK32ZCMP: # %bb.0:
-; CHECK32ZCMP-NEXT: addi sp, sp, -16
-; CHECK32ZCMP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32ZCMP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; CHECK32ZCMP-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32ZCMP-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT: cm.push {ra, s0-s2}, -16
; CHECK32ZCMP-NEXT: cm.mvsa01 s1, s0
; CHECK32ZCMP-NEXT: call func at plt
; CHECK32ZCMP-NEXT: mv s2, a0
; CHECK32ZCMP-NEXT: cm.mva01s s1, s0
; CHECK32ZCMP-NEXT: call func at plt
; CHECK32ZCMP-NEXT: add a0, s2, s0
-; CHECK32ZCMP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: addi sp, sp, 16
+; CHECK32ZCMP-NEXT: cm.pop {ra, s0-s2}, 16
; CHECK32ZCMP-NEXT: ret
;
; CHECK64I-LABEL: zcmp_mv:
@@ -78,22 +70,14 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
;
; CHECK64ZCMP-LABEL: zcmp_mv:
; CHECK64ZCMP: # %bb.0:
-; CHECK64ZCMP-NEXT: addi sp, sp, -32
-; CHECK64ZCMP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; CHECK64ZCMP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; CHECK64ZCMP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; CHECK64ZCMP-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT: cm.push {ra, s0-s2}, -32
; CHECK64ZCMP-NEXT: cm.mvsa01 s1, s0
; CHECK64ZCMP-NEXT: call func at plt
; CHECK64ZCMP-NEXT: mv s2, a0
; CHECK64ZCMP-NEXT: cm.mva01s s1, s0
; CHECK64ZCMP-NEXT: call func at plt
; CHECK64ZCMP-NEXT: addw a0, s2, s0
-; CHECK64ZCMP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: addi sp, sp, 32
+; CHECK64ZCMP-NEXT: cm.pop {ra, s0-s2}, 32
; CHECK64ZCMP-NEXT: ret
%call = call i32 @func(i32 %num, i32 %f)
%call1 = call i32 @func(i32 %num, i32 %f)
@@ -126,10 +110,7 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
;
; CHECK32ZCMP-LABEL: not_zcmp_mv:
; CHECK32ZCMP: # %bb.0:
-; CHECK32ZCMP-NEXT: addi sp, sp, -16
-; CHECK32ZCMP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32ZCMP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; CHECK32ZCMP-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32ZCMP-NEXT: cm.push {ra, s0-s1}, -16
; CHECK32ZCMP-NEXT: mv s0, a1
; CHECK32ZCMP-NEXT: call foo at plt
; CHECK32ZCMP-NEXT: mv s1, a0
@@ -140,10 +121,7 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK32ZCMP-NEXT: li a0, 1
; CHECK32ZCMP-NEXT: mv a1, s0
; CHECK32ZCMP-NEXT: call func at plt
-; CHECK32ZCMP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; CHECK32ZCMP-NEXT: addi sp, sp, 16
+; CHECK32ZCMP-NEXT: cm.pop {ra, s0-s1}, 16
; CHECK32ZCMP-NEXT: ret
;
; CHECK64I-LABEL: not_zcmp_mv:
@@ -170,10 +148,7 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
;
; CHECK64ZCMP-LABEL: not_zcmp_mv:
; CHECK64ZCMP: # %bb.0:
-; CHECK64ZCMP-NEXT: addi sp, sp, -32
-; CHECK64ZCMP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; CHECK64ZCMP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; CHECK64ZCMP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK64ZCMP-NEXT: cm.push {ra, s0-s1}, -32
; CHECK64ZCMP-NEXT: mv s0, a1
; CHECK64ZCMP-NEXT: call foo at plt
; CHECK64ZCMP-NEXT: mv s1, a0
@@ -184,10 +159,7 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK64ZCMP-NEXT: li a0, 1
; CHECK64ZCMP-NEXT: mv a1, s0
; CHECK64ZCMP-NEXT: call func at plt
-; CHECK64ZCMP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
-; CHECK64ZCMP-NEXT: addi sp, sp, 32
+; CHECK64ZCMP-NEXT: cm.pop {ra, s0-s1}, 32
; CHECK64ZCMP-NEXT: ret
%call = call i32 @foo(i32 %num)
%call1 = call i32 @foo(i32 %f)
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
new file mode 100644
index 00000000000000..c0e46fc7649b5c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -0,0 +1,3106 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+zcmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32IZCMP
+; RUN: llc -mtriple=riscv64 -mattr=+zcmp -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64IZCMP
+; RUN: llc -mtriple=riscv32 -mattr=+zcmp,+save-restore \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IZCMP-SR
+; RUN: llc -mtriple=riscv64 -mattr=+zcmp,+save-restore \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IZCMP-SR
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64I %s
+
+declare void @test(i8*)
+declare void @callee_void(i8*)
+declare i32 @callee(i8*)
+
+define i32 @foo() {
+; RV32IZCMP-LABEL: foo:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra}, -64
+; RV32IZCMP-NEXT: addi sp, sp, -464
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 528
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: mv a0, sp
+; RV32IZCMP-NEXT: call test at plt
+; RV32IZCMP-NEXT: li a0, 0
+; RV32IZCMP-NEXT: addi sp, sp, 464
+; RV32IZCMP-NEXT: cm.pop {ra}, 64
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: foo:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra}, -64
+; RV64IZCMP-NEXT: addi sp, sp, -464
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 528
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: mv a0, sp
+; RV64IZCMP-NEXT: call test at plt
+; RV64IZCMP-NEXT: li a0, 0
+; RV64IZCMP-NEXT: addi sp, sp, 464
+; RV64IZCMP-NEXT: cm.pop {ra}, 64
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: foo:
+; RV32IZCMP-SR: # %bb.0:
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_0
+; RV32IZCMP-SR-NEXT: addi sp, sp, -512
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: mv a0, sp
+; RV32IZCMP-SR-NEXT: call test at plt
+; RV32IZCMP-SR-NEXT: li a0, 0
+; RV32IZCMP-SR-NEXT: addi sp, sp, 512
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_0
+;
+; RV64IZCMP-SR-LABEL: foo:
+; RV64IZCMP-SR: # %bb.0:
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_0
+; RV64IZCMP-SR-NEXT: addi sp, sp, -512
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: mv a0, sp
+; RV64IZCMP-SR-NEXT: call test at plt
+; RV64IZCMP-SR-NEXT: li a0, 0
+; RV64IZCMP-SR-NEXT: addi sp, sp, 512
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_0
+;
+; RV32I-LABEL: foo:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -528
+; RV32I-NEXT: .cfi_def_cfa_offset 528
+; RV32I-NEXT: sw ra, 524(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: addi a0, sp, 12
+; RV32I-NEXT: call test at plt
+; RV32I-NEXT: li a0, 0
+; RV32I-NEXT: lw ra, 524(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 528
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: foo:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -528
+; RV64I-NEXT: .cfi_def_cfa_offset 528
+; RV64I-NEXT: sd ra, 520(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: addi a0, sp, 8
+; RV64I-NEXT: call test at plt
+; RV64I-NEXT: li a0, 0
+; RV64I-NEXT: ld ra, 520(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 528
+; RV64I-NEXT: ret
+ %1 = alloca [512 x i8]
+ %2 = getelementptr [512 x i8], [512 x i8]* %1, i32 0, i32 0
+ call void @test(i8* %2)
+ ret i32 0
+}
+
+define i32 @pushpopret0(i32 signext %size){
+; RV32IZCMP-LABEL: pushpopret0:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub a0, sp, a0
+; RV32IZCMP-NEXT: mv sp, a0
+; RV32IZCMP-NEXT: call callee_void at plt
+; RV32IZCMP-NEXT: li a0, 0
+; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: pushpopret0:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: addi s0, sp, 16
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub a0, sp, a0
+; RV64IZCMP-NEXT: mv sp, a0
+; RV64IZCMP-NEXT: call callee_void at plt
+; RV64IZCMP-NEXT: li a0, 0
+; RV64IZCMP-NEXT: addi sp, s0, -16
+; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: pushpopret0:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub a0, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, a0
+; RV32IZCMP-SR-NEXT: call callee_void at plt
+; RV32IZCMP-SR-NEXT: li a0, 0
+; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV64IZCMP-SR-LABEL: pushpopret0:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: addi s0, sp, 16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub a0, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, a0
+; RV64IZCMP-SR-NEXT: call callee_void at plt
+; RV64IZCMP-SR-NEXT: li a0, 0
+; RV64IZCMP-SR-NEXT: addi sp, s0, -16
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV32I-LABEL: pushpopret0:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: .cfi_def_cfa s0, 0
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub a0, sp, a0
+; RV32I-NEXT: mv sp, a0
+; RV32I-NEXT: call callee_void at plt
+; RV32I-NEXT: li a0, 0
+; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: pushpopret0:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: .cfi_def_cfa_offset 16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: addi s0, sp, 16
+; RV64I-NEXT: .cfi_def_cfa s0, 0
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub a0, sp, a0
+; RV64I-NEXT: mv sp, a0
+; RV64I-NEXT: call callee_void at plt
+; RV64I-NEXT: li a0, 0
+; RV64I-NEXT: addi sp, s0, -16
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+entry:
+ %0 = alloca i8, i32 %size, align 16
+ call void @callee_void(i8* nonnull %0)
+ ret i32 0
+}
+
+define i32 @pushpopret1(i32 signext %size) {
+; RV32IZCMP-LABEL: pushpopret1:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub a0, sp, a0
+; RV32IZCMP-NEXT: mv sp, a0
+; RV32IZCMP-NEXT: call callee_void at plt
+; RV32IZCMP-NEXT: li a0, 1
+; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: pushpopret1:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: addi s0, sp, 16
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub a0, sp, a0
+; RV64IZCMP-NEXT: mv sp, a0
+; RV64IZCMP-NEXT: call callee_void at plt
+; RV64IZCMP-NEXT: li a0, 1
+; RV64IZCMP-NEXT: addi sp, s0, -16
+; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: pushpopret1:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub a0, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, a0
+; RV32IZCMP-SR-NEXT: call callee_void at plt
+; RV32IZCMP-SR-NEXT: li a0, 1
+; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV64IZCMP-SR-LABEL: pushpopret1:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: addi s0, sp, 16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub a0, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, a0
+; RV64IZCMP-SR-NEXT: call callee_void at plt
+; RV64IZCMP-SR-NEXT: li a0, 1
+; RV64IZCMP-SR-NEXT: addi sp, s0, -16
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV32I-LABEL: pushpopret1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: .cfi_def_cfa s0, 0
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub a0, sp, a0
+; RV32I-NEXT: mv sp, a0
+; RV32I-NEXT: call callee_void at plt
+; RV32I-NEXT: li a0, 1
+; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: pushpopret1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: .cfi_def_cfa_offset 16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: addi s0, sp, 16
+; RV64I-NEXT: .cfi_def_cfa s0, 0
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub a0, sp, a0
+; RV64I-NEXT: mv sp, a0
+; RV64I-NEXT: call callee_void at plt
+; RV64I-NEXT: li a0, 1
+; RV64I-NEXT: addi sp, s0, -16
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+entry:
+ %0 = alloca i8, i32 %size, align 16
+ call void @callee_void(i8* nonnull %0)
+ ret i32 1
+}
+
+define i32 @pushpopretneg1(i32 signext %size) {
+; RV32IZCMP-LABEL: pushpopretneg1:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub a0, sp, a0
+; RV32IZCMP-NEXT: mv sp, a0
+; RV32IZCMP-NEXT: call callee_void at plt
+; RV32IZCMP-NEXT: li a0, -1
+; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: pushpopretneg1:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: addi s0, sp, 16
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub a0, sp, a0
+; RV64IZCMP-NEXT: mv sp, a0
+; RV64IZCMP-NEXT: call callee_void at plt
+; RV64IZCMP-NEXT: li a0, -1
+; RV64IZCMP-NEXT: addi sp, s0, -16
+; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: pushpopretneg1:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub a0, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, a0
+; RV32IZCMP-SR-NEXT: call callee_void at plt
+; RV32IZCMP-SR-NEXT: li a0, -1
+; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV64IZCMP-SR-LABEL: pushpopretneg1:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: addi s0, sp, 16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub a0, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, a0
+; RV64IZCMP-SR-NEXT: call callee_void at plt
+; RV64IZCMP-SR-NEXT: li a0, -1
+; RV64IZCMP-SR-NEXT: addi sp, s0, -16
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV32I-LABEL: pushpopretneg1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: .cfi_def_cfa s0, 0
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub a0, sp, a0
+; RV32I-NEXT: mv sp, a0
+; RV32I-NEXT: call callee_void at plt
+; RV32I-NEXT: li a0, -1
+; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: pushpopretneg1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: .cfi_def_cfa_offset 16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: addi s0, sp, 16
+; RV64I-NEXT: .cfi_def_cfa s0, 0
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub a0, sp, a0
+; RV64I-NEXT: mv sp, a0
+; RV64I-NEXT: call callee_void at plt
+; RV64I-NEXT: li a0, -1
+; RV64I-NEXT: addi sp, s0, -16
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+entry:
+ %0 = alloca i8, i32 %size, align 16
+ call void @callee_void(i8* nonnull %0)
+ ret i32 -1
+}
+
+define i32 @pushpopret2(i32 signext %size) {
+; RV32IZCMP-LABEL: pushpopret2:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub a0, sp, a0
+; RV32IZCMP-NEXT: mv sp, a0
+; RV32IZCMP-NEXT: call callee_void at plt
+; RV32IZCMP-NEXT: li a0, 2
+; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: pushpopret2:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: addi s0, sp, 16
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub a0, sp, a0
+; RV64IZCMP-NEXT: mv sp, a0
+; RV64IZCMP-NEXT: call callee_void at plt
+; RV64IZCMP-NEXT: li a0, 2
+; RV64IZCMP-NEXT: addi sp, s0, -16
+; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: pushpopret2:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub a0, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, a0
+; RV32IZCMP-SR-NEXT: call callee_void at plt
+; RV32IZCMP-SR-NEXT: li a0, 2
+; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV64IZCMP-SR-LABEL: pushpopret2:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_1
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: addi s0, sp, 16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub a0, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, a0
+; RV64IZCMP-SR-NEXT: call callee_void at plt
+; RV64IZCMP-SR-NEXT: li a0, 2
+; RV64IZCMP-SR-NEXT: addi sp, s0, -16
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_1
+;
+; RV32I-LABEL: pushpopret2:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: .cfi_def_cfa s0, 0
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub a0, sp, a0
+; RV32I-NEXT: mv sp, a0
+; RV32I-NEXT: call callee_void at plt
+; RV32I-NEXT: li a0, 2
+; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: pushpopret2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: .cfi_def_cfa_offset 16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: addi s0, sp, 16
+; RV64I-NEXT: .cfi_def_cfa s0, 0
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub a0, sp, a0
+; RV64I-NEXT: mv sp, a0
+; RV64I-NEXT: call callee_void at plt
+; RV64I-NEXT: li a0, 2
+; RV64I-NEXT: addi sp, s0, -16
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+entry:
+ %0 = alloca i8, i32 %size, align 16
+ call void @callee_void(i8* nonnull %0)
+ ret i32 2
+}
+
+define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
+; RV32IZCMP-LABEL: tailcall:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub a0, sp, a0
+; RV32IZCMP-NEXT: mv sp, a0
+; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV32IZCMP-NEXT: tail callee at plt
+;
+; RV64IZCMP-LABEL: tailcall:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: addi s0, sp, 16
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub a0, sp, a0
+; RV64IZCMP-NEXT: mv sp, a0
+; RV64IZCMP-NEXT: addi sp, s0, -16
+; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
+; RV64IZCMP-NEXT: tail callee at plt
+;
+; RV32IZCMP-SR-LABEL: tailcall:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub a0, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, a0
+; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
+; RV32IZCMP-SR-NEXT: tail callee at plt
+;
+; RV64IZCMP-SR-LABEL: tailcall:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: addi s0, sp, 16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub a0, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, a0
+; RV64IZCMP-SR-NEXT: addi sp, s0, -16
+; RV64IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
+; RV64IZCMP-SR-NEXT: tail callee at plt
+;
+; RV32I-LABEL: tailcall:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: .cfi_def_cfa s0, 0
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub a0, sp, a0
+; RV32I-NEXT: mv sp, a0
+; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: tail callee at plt
+;
+; RV64I-LABEL: tailcall:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: .cfi_def_cfa_offset 16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: addi s0, sp, 16
+; RV64I-NEXT: .cfi_def_cfa s0, 0
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub a0, sp, a0
+; RV64I-NEXT: mv sp, a0
+; RV64I-NEXT: addi sp, s0, -16
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: tail callee at plt
+entry:
+ %0 = alloca i8, i32 %size, align 16
+ %1 = tail call i32 @callee(i8* nonnull %0)
+ ret i32 %1
+}
+
+ at var = global [5 x i32] zeroinitializer
+define i32 @nocompress(i32 signext %size) {
+; RV32IZCMP-LABEL: nocompress:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0-s8}, -48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 48
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: .cfi_offset s1, -12
+; RV32IZCMP-NEXT: .cfi_offset s2, -16
+; RV32IZCMP-NEXT: .cfi_offset s3, -20
+; RV32IZCMP-NEXT: .cfi_offset s4, -24
+; RV32IZCMP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-NEXT: .cfi_offset s6, -32
+; RV32IZCMP-NEXT: .cfi_offset s7, -36
+; RV32IZCMP-NEXT: .cfi_offset s8, -40
+; RV32IZCMP-NEXT: addi s0, sp, 48
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub s2, sp, a0
+; RV32IZCMP-NEXT: mv sp, s2
+; RV32IZCMP-NEXT: lui s1, %hi(var)
+; RV32IZCMP-NEXT: lw s3, %lo(var)(s1)
+; RV32IZCMP-NEXT: lw s4, %lo(var+4)(s1)
+; RV32IZCMP-NEXT: lw s5, %lo(var+8)(s1)
+; RV32IZCMP-NEXT: lw s6, %lo(var+12)(s1)
+; RV32IZCMP-NEXT: addi s7, s1, %lo(var)
+; RV32IZCMP-NEXT: lw s8, 16(s7)
+; RV32IZCMP-NEXT: mv a0, s2
+; RV32IZCMP-NEXT: call callee_void at plt
+; RV32IZCMP-NEXT: sw s8, 16(s7)
+; RV32IZCMP-NEXT: sw s6, %lo(var+12)(s1)
+; RV32IZCMP-NEXT: sw s5, %lo(var+8)(s1)
+; RV32IZCMP-NEXT: sw s4, %lo(var+4)(s1)
+; RV32IZCMP-NEXT: sw s3, %lo(var)(s1)
+; RV32IZCMP-NEXT: mv a0, s2
+; RV32IZCMP-NEXT: addi sp, s0, -48
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s8}, 48
+; RV32IZCMP-NEXT: tail callee at plt
+;
+; RV64IZCMP-LABEL: nocompress:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0-s8}, -80
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 80
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: .cfi_offset s1, -24
+; RV64IZCMP-NEXT: .cfi_offset s2, -32
+; RV64IZCMP-NEXT: .cfi_offset s3, -40
+; RV64IZCMP-NEXT: .cfi_offset s4, -48
+; RV64IZCMP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-NEXT: .cfi_offset s6, -64
+; RV64IZCMP-NEXT: .cfi_offset s7, -72
+; RV64IZCMP-NEXT: .cfi_offset s8, -80
+; RV64IZCMP-NEXT: addi s0, sp, 80
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub s2, sp, a0
+; RV64IZCMP-NEXT: mv sp, s2
+; RV64IZCMP-NEXT: lui s1, %hi(var)
+; RV64IZCMP-NEXT: lw s3, %lo(var)(s1)
+; RV64IZCMP-NEXT: lw s4, %lo(var+4)(s1)
+; RV64IZCMP-NEXT: lw s5, %lo(var+8)(s1)
+; RV64IZCMP-NEXT: lw s6, %lo(var+12)(s1)
+; RV64IZCMP-NEXT: addi s7, s1, %lo(var)
+; RV64IZCMP-NEXT: lw s8, 16(s7)
+; RV64IZCMP-NEXT: mv a0, s2
+; RV64IZCMP-NEXT: call callee_void at plt
+; RV64IZCMP-NEXT: sw s8, 16(s7)
+; RV64IZCMP-NEXT: sw s6, %lo(var+12)(s1)
+; RV64IZCMP-NEXT: sw s5, %lo(var+8)(s1)
+; RV64IZCMP-NEXT: sw s4, %lo(var+4)(s1)
+; RV64IZCMP-NEXT: sw s3, %lo(var)(s1)
+; RV64IZCMP-NEXT: mv a0, s2
+; RV64IZCMP-NEXT: addi sp, s0, -80
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s8}, 80
+; RV64IZCMP-NEXT: tail callee at plt
+;
+; RV32IZCMP-SR-LABEL: nocompress:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: .cfi_offset s1, -12
+; RV32IZCMP-SR-NEXT: .cfi_offset s2, -16
+; RV32IZCMP-SR-NEXT: .cfi_offset s3, -20
+; RV32IZCMP-SR-NEXT: .cfi_offset s4, -24
+; RV32IZCMP-SR-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-SR-NEXT: .cfi_offset s6, -32
+; RV32IZCMP-SR-NEXT: .cfi_offset s7, -36
+; RV32IZCMP-SR-NEXT: .cfi_offset s8, -40
+; RV32IZCMP-SR-NEXT: addi s0, sp, 48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub s2, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, s2
+; RV32IZCMP-SR-NEXT: lui s1, %hi(var)
+; RV32IZCMP-SR-NEXT: lw s3, %lo(var)(s1)
+; RV32IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
+; RV32IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
+; RV32IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
+; RV32IZCMP-SR-NEXT: addi s7, s1, %lo(var)
+; RV32IZCMP-SR-NEXT: lw s8, 16(s7)
+; RV32IZCMP-SR-NEXT: mv a0, s2
+; RV32IZCMP-SR-NEXT: call callee_void at plt
+; RV32IZCMP-SR-NEXT: sw s8, 16(s7)
+; RV32IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
+; RV32IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
+; RV32IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
+; RV32IZCMP-SR-NEXT: sw s3, %lo(var)(s1)
+; RV32IZCMP-SR-NEXT: mv a0, s2
+; RV32IZCMP-SR-NEXT: addi sp, s0, -48
+; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 48
+; RV32IZCMP-SR-NEXT: tail callee at plt
+;
+; RV64IZCMP-SR-LABEL: nocompress:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: .cfi_offset s1, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset s2, -32
+; RV64IZCMP-SR-NEXT: .cfi_offset s3, -40
+; RV64IZCMP-SR-NEXT: .cfi_offset s4, -48
+; RV64IZCMP-SR-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-SR-NEXT: .cfi_offset s6, -64
+; RV64IZCMP-SR-NEXT: .cfi_offset s7, -72
+; RV64IZCMP-SR-NEXT: .cfi_offset s8, -80
+; RV64IZCMP-SR-NEXT: addi s0, sp, 80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub s2, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, s2
+; RV64IZCMP-SR-NEXT: lui s1, %hi(var)
+; RV64IZCMP-SR-NEXT: lw s3, %lo(var)(s1)
+; RV64IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
+; RV64IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
+; RV64IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
+; RV64IZCMP-SR-NEXT: addi s7, s1, %lo(var)
+; RV64IZCMP-SR-NEXT: lw s8, 16(s7)
+; RV64IZCMP-SR-NEXT: mv a0, s2
+; RV64IZCMP-SR-NEXT: call callee_void at plt
+; RV64IZCMP-SR-NEXT: sw s8, 16(s7)
+; RV64IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
+; RV64IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
+; RV64IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
+; RV64IZCMP-SR-NEXT: sw s3, %lo(var)(s1)
+; RV64IZCMP-SR-NEXT: mv a0, s2
+; RV64IZCMP-SR-NEXT: addi sp, s0, -80
+; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 80
+; RV64IZCMP-SR-NEXT: tail callee at plt
+;
+; RV32I-LABEL: nocompress:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -48
+; RV32I-NEXT: .cfi_def_cfa_offset 48
+; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: .cfi_offset s4, -24
+; RV32I-NEXT: .cfi_offset s5, -28
+; RV32I-NEXT: .cfi_offset s6, -32
+; RV32I-NEXT: .cfi_offset s7, -36
+; RV32I-NEXT: .cfi_offset s8, -40
+; RV32I-NEXT: addi s0, sp, 48
+; RV32I-NEXT: .cfi_def_cfa s0, 0
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub s1, sp, a0
+; RV32I-NEXT: mv sp, s1
+; RV32I-NEXT: lui s2, %hi(var)
+; RV32I-NEXT: lw s3, %lo(var)(s2)
+; RV32I-NEXT: lw s4, %lo(var+4)(s2)
+; RV32I-NEXT: lw s5, %lo(var+8)(s2)
+; RV32I-NEXT: lw s6, %lo(var+12)(s2)
+; RV32I-NEXT: addi s7, s2, %lo(var)
+; RV32I-NEXT: lw s8, 16(s7)
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: call callee_void at plt
+; RV32I-NEXT: sw s8, 16(s7)
+; RV32I-NEXT: sw s6, %lo(var+12)(s2)
+; RV32I-NEXT: sw s5, %lo(var+8)(s2)
+; RV32I-NEXT: sw s4, %lo(var+4)(s2)
+; RV32I-NEXT: sw s3, %lo(var)(s2)
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: addi sp, s0, -48
+; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: tail callee at plt
+;
+; RV64I-LABEL: nocompress:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -80
+; RV64I-NEXT: .cfi_def_cfa_offset 80
+; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s5, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s6, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s7, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s8, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: .cfi_offset s3, -40
+; RV64I-NEXT: .cfi_offset s4, -48
+; RV64I-NEXT: .cfi_offset s5, -56
+; RV64I-NEXT: .cfi_offset s6, -64
+; RV64I-NEXT: .cfi_offset s7, -72
+; RV64I-NEXT: .cfi_offset s8, -80
+; RV64I-NEXT: addi s0, sp, 80
+; RV64I-NEXT: .cfi_def_cfa s0, 0
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub s1, sp, a0
+; RV64I-NEXT: mv sp, s1
+; RV64I-NEXT: lui s2, %hi(var)
+; RV64I-NEXT: lw s3, %lo(var)(s2)
+; RV64I-NEXT: lw s4, %lo(var+4)(s2)
+; RV64I-NEXT: lw s5, %lo(var+8)(s2)
+; RV64I-NEXT: lw s6, %lo(var+12)(s2)
+; RV64I-NEXT: addi s7, s2, %lo(var)
+; RV64I-NEXT: lw s8, 16(s7)
+; RV64I-NEXT: mv a0, s1
+; RV64I-NEXT: call callee_void at plt
+; RV64I-NEXT: sw s8, 16(s7)
+; RV64I-NEXT: sw s6, %lo(var+12)(s2)
+; RV64I-NEXT: sw s5, %lo(var+8)(s2)
+; RV64I-NEXT: sw s4, %lo(var+4)(s2)
+; RV64I-NEXT: sw s3, %lo(var)(s2)
+; RV64I-NEXT: mv a0, s1
+; RV64I-NEXT: addi sp, s0, -80
+; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s5, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s6, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s7, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s8, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 80
+; RV64I-NEXT: tail callee at plt
+entry:
+ %0 = alloca i8, i32 %size, align 16
+ %val = load [5 x i32], [5 x i32]* @var
+ call void @callee_void(i8* nonnull %0)
+ store volatile [5 x i32] %val, [5 x i32]* @var
+ %1 = tail call i32 @callee(i8* nonnull %0)
+ ret i32 %1
+}
+
+; Check that functions with varargs do not use save/restore code
+
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+
+define i32 @varargs(ptr %fmt, ...) nounwind {
+; RV32IZCMP-LABEL: varargs:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: mv a0, a1
+; RV32IZCMP-NEXT: sw a7, 44(sp)
+; RV32IZCMP-NEXT: sw a6, 40(sp)
+; RV32IZCMP-NEXT: sw a5, 36(sp)
+; RV32IZCMP-NEXT: sw a4, 32(sp)
+; RV32IZCMP-NEXT: sw a3, 28(sp)
+; RV32IZCMP-NEXT: sw a2, 24(sp)
+; RV32IZCMP-NEXT: sw a1, 20(sp)
+; RV32IZCMP-NEXT: addi a1, sp, 24
+; RV32IZCMP-NEXT: sw a1, 12(sp)
+; RV32IZCMP-NEXT: addi sp, sp, 48
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: varargs:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: addi sp, sp, -80
+; RV64IZCMP-NEXT: sd a7, 72(sp)
+; RV64IZCMP-NEXT: sd a6, 64(sp)
+; RV64IZCMP-NEXT: sd a5, 56(sp)
+; RV64IZCMP-NEXT: sd a4, 48(sp)
+; RV64IZCMP-NEXT: sd a3, 40(sp)
+; RV64IZCMP-NEXT: sd a2, 32(sp)
+; RV64IZCMP-NEXT: sd a1, 24(sp)
+; RV64IZCMP-NEXT: addi a0, sp, 24
+; RV64IZCMP-NEXT: sd a0, 8(sp)
+; RV64IZCMP-NEXT: lwu a0, 12(sp)
+; RV64IZCMP-NEXT: lwu a1, 8(sp)
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: or a0, a0, a1
+; RV64IZCMP-NEXT: addi a1, a0, 4
+; RV64IZCMP-NEXT: sw a1, 8(sp)
+; RV64IZCMP-NEXT: srli a1, a1, 32
+; RV64IZCMP-NEXT: sw a1, 12(sp)
+; RV64IZCMP-NEXT: lw a0, 0(a0)
+; RV64IZCMP-NEXT: addi sp, sp, 80
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: varargs:
+; RV32IZCMP-SR: # %bb.0:
+; RV32IZCMP-SR-NEXT: addi sp, sp, -48
+; RV32IZCMP-SR-NEXT: mv a0, a1
+; RV32IZCMP-SR-NEXT: sw a7, 44(sp)
+; RV32IZCMP-SR-NEXT: sw a6, 40(sp)
+; RV32IZCMP-SR-NEXT: sw a5, 36(sp)
+; RV32IZCMP-SR-NEXT: sw a4, 32(sp)
+; RV32IZCMP-SR-NEXT: sw a3, 28(sp)
+; RV32IZCMP-SR-NEXT: sw a2, 24(sp)
+; RV32IZCMP-SR-NEXT: sw a1, 20(sp)
+; RV32IZCMP-SR-NEXT: addi a1, sp, 24
+; RV32IZCMP-SR-NEXT: sw a1, 12(sp)
+; RV32IZCMP-SR-NEXT: addi sp, sp, 48
+; RV32IZCMP-SR-NEXT: ret
+;
+; RV64IZCMP-SR-LABEL: varargs:
+; RV64IZCMP-SR: # %bb.0:
+; RV64IZCMP-SR-NEXT: addi sp, sp, -80
+; RV64IZCMP-SR-NEXT: sd a7, 72(sp)
+; RV64IZCMP-SR-NEXT: sd a6, 64(sp)
+; RV64IZCMP-SR-NEXT: sd a5, 56(sp)
+; RV64IZCMP-SR-NEXT: sd a4, 48(sp)
+; RV64IZCMP-SR-NEXT: sd a3, 40(sp)
+; RV64IZCMP-SR-NEXT: sd a2, 32(sp)
+; RV64IZCMP-SR-NEXT: sd a1, 24(sp)
+; RV64IZCMP-SR-NEXT: addi a0, sp, 24
+; RV64IZCMP-SR-NEXT: sd a0, 8(sp)
+; RV64IZCMP-SR-NEXT: lwu a0, 12(sp)
+; RV64IZCMP-SR-NEXT: lwu a1, 8(sp)
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: or a0, a0, a1
+; RV64IZCMP-SR-NEXT: addi a1, a0, 4
+; RV64IZCMP-SR-NEXT: sw a1, 8(sp)
+; RV64IZCMP-SR-NEXT: srli a1, a1, 32
+; RV64IZCMP-SR-NEXT: sw a1, 12(sp)
+; RV64IZCMP-SR-NEXT: lw a0, 0(a0)
+; RV64IZCMP-SR-NEXT: addi sp, sp, 80
+; RV64IZCMP-SR-NEXT: ret
+;
+; RV32I-LABEL: varargs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -48
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: sw a7, 44(sp)
+; RV32I-NEXT: sw a6, 40(sp)
+; RV32I-NEXT: sw a5, 36(sp)
+; RV32I-NEXT: sw a4, 32(sp)
+; RV32I-NEXT: sw a3, 28(sp)
+; RV32I-NEXT: sw a2, 24(sp)
+; RV32I-NEXT: sw a1, 20(sp)
+; RV32I-NEXT: addi a1, sp, 24
+; RV32I-NEXT: sw a1, 12(sp)
+; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: varargs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -80
+; RV64I-NEXT: sd a7, 72(sp)
+; RV64I-NEXT: sd a6, 64(sp)
+; RV64I-NEXT: sd a5, 56(sp)
+; RV64I-NEXT: sd a4, 48(sp)
+; RV64I-NEXT: sd a3, 40(sp)
+; RV64I-NEXT: sd a2, 32(sp)
+; RV64I-NEXT: sd a1, 24(sp)
+; RV64I-NEXT: addi a0, sp, 24
+; RV64I-NEXT: sd a0, 8(sp)
+; RV64I-NEXT: lwu a0, 12(sp)
+; RV64I-NEXT: lwu a1, 8(sp)
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: addi a1, a0, 4
+; RV64I-NEXT: sw a1, 8(sp)
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: sw a1, 12(sp)
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: addi sp, sp, 80
+; RV64I-NEXT: ret
+ %va = alloca ptr, align 4
+ call void @llvm.va_start(ptr %va)
+ %argp.cur = load ptr, ptr %va, align 4
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+ store ptr %argp.next, ptr %va, align 4
+ %1 = load i32, ptr %argp.cur, align 4
+ call void @llvm.va_end(ptr %va)
+ ret i32 %1
+}
+
+ at var0 = global [18 x i32] zeroinitializer
+
+define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
+; RV32IZCMP-LABEL: many_args:
+; RV32IZCMP: # %bb.0: # %entry
+; RV32IZCMP-NEXT: cm.push {ra, s0-s4}, -32
+; RV32IZCMP-NEXT: lui a0, %hi(var0)
+; RV32IZCMP-NEXT: lw a6, %lo(var0)(a0)
+; RV32IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
+; RV32IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
+; RV32IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
+; RV32IZCMP-NEXT: addi a5, a0, %lo(var0)
+; RV32IZCMP-NEXT: lw t2, 16(a5)
+; RV32IZCMP-NEXT: lw t3, 20(a5)
+; RV32IZCMP-NEXT: lw t4, 24(a5)
+; RV32IZCMP-NEXT: lw t5, 28(a5)
+; RV32IZCMP-NEXT: lw t6, 32(a5)
+; RV32IZCMP-NEXT: lw s2, 36(a5)
+; RV32IZCMP-NEXT: lw s3, 40(a5)
+; RV32IZCMP-NEXT: lw s4, 44(a5)
+; RV32IZCMP-NEXT: lw a1, 48(a5)
+; RV32IZCMP-NEXT: lw s0, 52(a5)
+; RV32IZCMP-NEXT: lw s1, 68(a5)
+; RV32IZCMP-NEXT: lw a2, 64(a5)
+; RV32IZCMP-NEXT: lw a3, 60(a5)
+; RV32IZCMP-NEXT: lw a4, 56(a5)
+; RV32IZCMP-NEXT: sw s1, 68(a5)
+; RV32IZCMP-NEXT: sw a2, 64(a5)
+; RV32IZCMP-NEXT: sw a3, 60(a5)
+; RV32IZCMP-NEXT: sw a4, 56(a5)
+; RV32IZCMP-NEXT: sw s0, 52(a5)
+; RV32IZCMP-NEXT: sw a1, 48(a5)
+; RV32IZCMP-NEXT: sw s4, 44(a5)
+; RV32IZCMP-NEXT: sw s3, 40(a5)
+; RV32IZCMP-NEXT: sw s2, 36(a5)
+; RV32IZCMP-NEXT: sw t6, 32(a5)
+; RV32IZCMP-NEXT: sw t5, 28(a5)
+; RV32IZCMP-NEXT: sw t4, 24(a5)
+; RV32IZCMP-NEXT: sw t3, 20(a5)
+; RV32IZCMP-NEXT: sw t2, 16(a5)
+; RV32IZCMP-NEXT: sw t1, %lo(var0+12)(a0)
+; RV32IZCMP-NEXT: sw t0, %lo(var0+8)(a0)
+; RV32IZCMP-NEXT: sw a7, %lo(var0+4)(a0)
+; RV32IZCMP-NEXT: sw a6, %lo(var0)(a0)
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s4}, 32
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: many_args:
+; RV64IZCMP: # %bb.0: # %entry
+; RV64IZCMP-NEXT: cm.push {ra, s0-s4}, -48
+; RV64IZCMP-NEXT: lui a0, %hi(var0)
+; RV64IZCMP-NEXT: lw a6, %lo(var0)(a0)
+; RV64IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
+; RV64IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
+; RV64IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
+; RV64IZCMP-NEXT: addi a5, a0, %lo(var0)
+; RV64IZCMP-NEXT: lw t2, 16(a5)
+; RV64IZCMP-NEXT: lw t3, 20(a5)
+; RV64IZCMP-NEXT: lw t4, 24(a5)
+; RV64IZCMP-NEXT: lw t5, 28(a5)
+; RV64IZCMP-NEXT: lw t6, 32(a5)
+; RV64IZCMP-NEXT: lw s2, 36(a5)
+; RV64IZCMP-NEXT: lw s3, 40(a5)
+; RV64IZCMP-NEXT: lw s4, 44(a5)
+; RV64IZCMP-NEXT: lw a1, 48(a5)
+; RV64IZCMP-NEXT: lw s0, 52(a5)
+; RV64IZCMP-NEXT: lw s1, 68(a5)
+; RV64IZCMP-NEXT: lw a2, 64(a5)
+; RV64IZCMP-NEXT: lw a3, 60(a5)
+; RV64IZCMP-NEXT: lw a4, 56(a5)
+; RV64IZCMP-NEXT: sw s1, 68(a5)
+; RV64IZCMP-NEXT: sw a2, 64(a5)
+; RV64IZCMP-NEXT: sw a3, 60(a5)
+; RV64IZCMP-NEXT: sw a4, 56(a5)
+; RV64IZCMP-NEXT: sw s0, 52(a5)
+; RV64IZCMP-NEXT: sw a1, 48(a5)
+; RV64IZCMP-NEXT: sw s4, 44(a5)
+; RV64IZCMP-NEXT: sw s3, 40(a5)
+; RV64IZCMP-NEXT: sw s2, 36(a5)
+; RV64IZCMP-NEXT: sw t6, 32(a5)
+; RV64IZCMP-NEXT: sw t5, 28(a5)
+; RV64IZCMP-NEXT: sw t4, 24(a5)
+; RV64IZCMP-NEXT: sw t3, 20(a5)
+; RV64IZCMP-NEXT: sw t2, 16(a5)
+; RV64IZCMP-NEXT: sw t1, %lo(var0+12)(a0)
+; RV64IZCMP-NEXT: sw t0, %lo(var0+8)(a0)
+; RV64IZCMP-NEXT: sw a7, %lo(var0+4)(a0)
+; RV64IZCMP-NEXT: sw a6, %lo(var0)(a0)
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s4}, 48
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: many_args:
+; RV32IZCMP-SR: # %bb.0: # %entry
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_5
+; RV32IZCMP-SR-NEXT: lui a0, %hi(var0)
+; RV32IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
+; RV32IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
+; RV32IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
+; RV32IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
+; RV32IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
+; RV32IZCMP-SR-NEXT: lw t2, 16(a5)
+; RV32IZCMP-SR-NEXT: lw t3, 20(a5)
+; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
+; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
+; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
+; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
+; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
+; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
+; RV32IZCMP-SR-NEXT: lw a1, 48(a5)
+; RV32IZCMP-SR-NEXT: lw s0, 52(a5)
+; RV32IZCMP-SR-NEXT: lw s1, 68(a5)
+; RV32IZCMP-SR-NEXT: lw a2, 64(a5)
+; RV32IZCMP-SR-NEXT: lw a3, 60(a5)
+; RV32IZCMP-SR-NEXT: lw a4, 56(a5)
+; RV32IZCMP-SR-NEXT: sw s1, 68(a5)
+; RV32IZCMP-SR-NEXT: sw a2, 64(a5)
+; RV32IZCMP-SR-NEXT: sw a3, 60(a5)
+; RV32IZCMP-SR-NEXT: sw a4, 56(a5)
+; RV32IZCMP-SR-NEXT: sw s0, 52(a5)
+; RV32IZCMP-SR-NEXT: sw a1, 48(a5)
+; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
+; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
+; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
+; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
+; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
+; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
+; RV32IZCMP-SR-NEXT: sw t3, 20(a5)
+; RV32IZCMP-SR-NEXT: sw t2, 16(a5)
+; RV32IZCMP-SR-NEXT: sw t1, %lo(var0+12)(a0)
+; RV32IZCMP-SR-NEXT: sw t0, %lo(var0+8)(a0)
+; RV32IZCMP-SR-NEXT: sw a7, %lo(var0+4)(a0)
+; RV32IZCMP-SR-NEXT: sw a6, %lo(var0)(a0)
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_5
+;
+; RV64IZCMP-SR-LABEL: many_args:
+; RV64IZCMP-SR: # %bb.0: # %entry
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_5
+; RV64IZCMP-SR-NEXT: lui a0, %hi(var0)
+; RV64IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
+; RV64IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
+; RV64IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
+; RV64IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
+; RV64IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
+; RV64IZCMP-SR-NEXT: lw t2, 16(a5)
+; RV64IZCMP-SR-NEXT: lw t3, 20(a5)
+; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
+; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
+; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
+; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
+; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
+; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
+; RV64IZCMP-SR-NEXT: lw a1, 48(a5)
+; RV64IZCMP-SR-NEXT: lw s0, 52(a5)
+; RV64IZCMP-SR-NEXT: lw s1, 68(a5)
+; RV64IZCMP-SR-NEXT: lw a2, 64(a5)
+; RV64IZCMP-SR-NEXT: lw a3, 60(a5)
+; RV64IZCMP-SR-NEXT: lw a4, 56(a5)
+; RV64IZCMP-SR-NEXT: sw s1, 68(a5)
+; RV64IZCMP-SR-NEXT: sw a2, 64(a5)
+; RV64IZCMP-SR-NEXT: sw a3, 60(a5)
+; RV64IZCMP-SR-NEXT: sw a4, 56(a5)
+; RV64IZCMP-SR-NEXT: sw s0, 52(a5)
+; RV64IZCMP-SR-NEXT: sw a1, 48(a5)
+; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
+; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
+; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
+; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
+; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
+; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
+; RV64IZCMP-SR-NEXT: sw t3, 20(a5)
+; RV64IZCMP-SR-NEXT: sw t2, 16(a5)
+; RV64IZCMP-SR-NEXT: sw t1, %lo(var0+12)(a0)
+; RV64IZCMP-SR-NEXT: sw t0, %lo(var0+8)(a0)
+; RV64IZCMP-SR-NEXT: sw a7, %lo(var0+4)(a0)
+; RV64IZCMP-SR-NEXT: sw a6, %lo(var0)(a0)
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_5
+;
+; RV32I-LABEL: many_args:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lui a0, %hi(var0)
+; RV32I-NEXT: lw a1, %lo(var0)(a0)
+; RV32I-NEXT: lw a2, %lo(var0+4)(a0)
+; RV32I-NEXT: lw a3, %lo(var0+8)(a0)
+; RV32I-NEXT: lw a4, %lo(var0+12)(a0)
+; RV32I-NEXT: addi a5, a0, %lo(var0)
+; RV32I-NEXT: lw a6, 16(a5)
+; RV32I-NEXT: lw a7, 20(a5)
+; RV32I-NEXT: lw t0, 24(a5)
+; RV32I-NEXT: lw t1, 28(a5)
+; RV32I-NEXT: lw t2, 32(a5)
+; RV32I-NEXT: lw t3, 36(a5)
+; RV32I-NEXT: lw t4, 40(a5)
+; RV32I-NEXT: lw t5, 44(a5)
+; RV32I-NEXT: lw t6, 48(a5)
+; RV32I-NEXT: lw s0, 52(a5)
+; RV32I-NEXT: lw s1, 68(a5)
+; RV32I-NEXT: lw s2, 64(a5)
+; RV32I-NEXT: lw s3, 60(a5)
+; RV32I-NEXT: lw s4, 56(a5)
+; RV32I-NEXT: sw s1, 68(a5)
+; RV32I-NEXT: sw s2, 64(a5)
+; RV32I-NEXT: sw s3, 60(a5)
+; RV32I-NEXT: sw s4, 56(a5)
+; RV32I-NEXT: sw s0, 52(a5)
+; RV32I-NEXT: sw t6, 48(a5)
+; RV32I-NEXT: sw t5, 44(a5)
+; RV32I-NEXT: sw t4, 40(a5)
+; RV32I-NEXT: sw t3, 36(a5)
+; RV32I-NEXT: sw t2, 32(a5)
+; RV32I-NEXT: sw t1, 28(a5)
+; RV32I-NEXT: sw t0, 24(a5)
+; RV32I-NEXT: sw a7, 20(a5)
+; RV32I-NEXT: sw a6, 16(a5)
+; RV32I-NEXT: sw a4, %lo(var0+12)(a0)
+; RV32I-NEXT: sw a3, %lo(var0+8)(a0)
+; RV32I-NEXT: sw a2, %lo(var0+4)(a0)
+; RV32I-NEXT: sw a1, %lo(var0)(a0)
+; RV32I-NEXT: lw s0, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: many_args:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: sd s0, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lui a0, %hi(var0)
+; RV64I-NEXT: lw a1, %lo(var0)(a0)
+; RV64I-NEXT: lw a2, %lo(var0+4)(a0)
+; RV64I-NEXT: lw a3, %lo(var0+8)(a0)
+; RV64I-NEXT: lw a4, %lo(var0+12)(a0)
+; RV64I-NEXT: addi a5, a0, %lo(var0)
+; RV64I-NEXT: lw a6, 16(a5)
+; RV64I-NEXT: lw a7, 20(a5)
+; RV64I-NEXT: lw t0, 24(a5)
+; RV64I-NEXT: lw t1, 28(a5)
+; RV64I-NEXT: lw t2, 32(a5)
+; RV64I-NEXT: lw t3, 36(a5)
+; RV64I-NEXT: lw t4, 40(a5)
+; RV64I-NEXT: lw t5, 44(a5)
+; RV64I-NEXT: lw t6, 48(a5)
+; RV64I-NEXT: lw s0, 52(a5)
+; RV64I-NEXT: lw s1, 68(a5)
+; RV64I-NEXT: lw s2, 64(a5)
+; RV64I-NEXT: lw s3, 60(a5)
+; RV64I-NEXT: lw s4, 56(a5)
+; RV64I-NEXT: sw s1, 68(a5)
+; RV64I-NEXT: sw s2, 64(a5)
+; RV64I-NEXT: sw s3, 60(a5)
+; RV64I-NEXT: sw s4, 56(a5)
+; RV64I-NEXT: sw s0, 52(a5)
+; RV64I-NEXT: sw t6, 48(a5)
+; RV64I-NEXT: sw t5, 44(a5)
+; RV64I-NEXT: sw t4, 40(a5)
+; RV64I-NEXT: sw t3, 36(a5)
+; RV64I-NEXT: sw t2, 32(a5)
+; RV64I-NEXT: sw t1, 28(a5)
+; RV64I-NEXT: sw t0, 24(a5)
+; RV64I-NEXT: sw a7, 20(a5)
+; RV64I-NEXT: sw a6, 16(a5)
+; RV64I-NEXT: sw a4, %lo(var0+12)(a0)
+; RV64I-NEXT: sw a3, %lo(var0+8)(a0)
+; RV64I-NEXT: sw a2, %lo(var0+4)(a0)
+; RV64I-NEXT: sw a1, %lo(var0)(a0)
+; RV64I-NEXT: ld s0, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+entry:
+ %val = load [18 x i32], ptr @var0
+ store volatile [18 x i32] %val, ptr @var0
+ ret void
+}
+
+; Check that dynamic allocation calculations remain correct
+
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
+declare void @notdead(ptr)
+
+define void @alloca(i32 %n) nounwind {
+; RV32IZCMP-LABEL: alloca:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -16
+; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: mv s1, sp
+; RV32IZCMP-NEXT: addi a0, a0, 15
+; RV32IZCMP-NEXT: andi a0, a0, -16
+; RV32IZCMP-NEXT: sub a0, sp, a0
+; RV32IZCMP-NEXT: mv sp, a0
+; RV32IZCMP-NEXT: call notdead at plt
+; RV32IZCMP-NEXT: mv sp, s1
+; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s1}, 16
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: alloca:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -32
+; RV64IZCMP-NEXT: addi s0, sp, 32
+; RV64IZCMP-NEXT: mv s1, sp
+; RV64IZCMP-NEXT: slli a0, a0, 32
+; RV64IZCMP-NEXT: srli a0, a0, 32
+; RV64IZCMP-NEXT: addi a0, a0, 15
+; RV64IZCMP-NEXT: andi a0, a0, -16
+; RV64IZCMP-NEXT: sub a0, sp, a0
+; RV64IZCMP-NEXT: mv sp, a0
+; RV64IZCMP-NEXT: call notdead at plt
+; RV64IZCMP-NEXT: mv sp, s1
+; RV64IZCMP-NEXT: addi sp, s0, -32
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s1}, 32
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: alloca:
+; RV32IZCMP-SR: # %bb.0:
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_2
+; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: mv s1, sp
+; RV32IZCMP-SR-NEXT: addi a0, a0, 15
+; RV32IZCMP-SR-NEXT: andi a0, a0, -16
+; RV32IZCMP-SR-NEXT: sub a0, sp, a0
+; RV32IZCMP-SR-NEXT: mv sp, a0
+; RV32IZCMP-SR-NEXT: call notdead at plt
+; RV32IZCMP-SR-NEXT: mv sp, s1
+; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_2
+;
+; RV64IZCMP-SR-LABEL: alloca:
+; RV64IZCMP-SR: # %bb.0:
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_2
+; RV64IZCMP-SR-NEXT: addi s0, sp, 32
+; RV64IZCMP-SR-NEXT: mv s1, sp
+; RV64IZCMP-SR-NEXT: slli a0, a0, 32
+; RV64IZCMP-SR-NEXT: srli a0, a0, 32
+; RV64IZCMP-SR-NEXT: addi a0, a0, 15
+; RV64IZCMP-SR-NEXT: andi a0, a0, -16
+; RV64IZCMP-SR-NEXT: sub a0, sp, a0
+; RV64IZCMP-SR-NEXT: mv sp, a0
+; RV64IZCMP-SR-NEXT: call notdead at plt
+; RV64IZCMP-SR-NEXT: mv sp, s1
+; RV64IZCMP-SR-NEXT: addi sp, s0, -32
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_2
+;
+; RV32I-LABEL: alloca:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: mv s1, sp
+; RV32I-NEXT: addi a0, a0, 15
+; RV32I-NEXT: andi a0, a0, -16
+; RV32I-NEXT: sub a0, sp, a0
+; RV32I-NEXT: mv sp, a0
+; RV32I-NEXT: call notdead at plt
+; RV32I-NEXT: mv sp, s1
+; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: alloca:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi s0, sp, 32
+; RV64I-NEXT: mv s1, sp
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: addi a0, a0, 15
+; RV64I-NEXT: andi a0, a0, -16
+; RV64I-NEXT: sub a0, sp, a0
+; RV64I-NEXT: mv sp, a0
+; RV64I-NEXT: call notdead at plt
+; RV64I-NEXT: mv sp, s1
+; RV64I-NEXT: addi sp, s0, -32
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+ %sp = call ptr @llvm.stacksave()
+ %addr = alloca i8, i32 %n
+ call void @notdead(ptr %addr)
+ call void @llvm.stackrestore(ptr %sp)
+ ret void
+}
+
+declare i32 @foo_test_irq(...)
+ at var_test_irq = global [32 x i32] zeroinitializer
+
+define void @foo_with_irq() nounwind "interrupt"="user" {
+; RV32IZCMP-LABEL: foo_with_irq:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra}, -64
+; RV32IZCMP-NEXT: addi sp, sp, -16
+; RV32IZCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: call foo_test_irq at plt
+; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: addi sp, sp, 16
+; RV32IZCMP-NEXT: cm.pop {ra}, 64
+; RV32IZCMP-NEXT: mret
+;
+; RV64IZCMP-LABEL: foo_with_irq:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra}, -64
+; RV64IZCMP-NEXT: addi sp, sp, -80
+; RV64IZCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: call foo_test_irq at plt
+; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: addi sp, sp, 80
+; RV64IZCMP-NEXT: cm.pop {ra}, 64
+; RV64IZCMP-NEXT: mret
+;
+; RV32IZCMP-SR-LABEL: foo_with_irq:
+; RV32IZCMP-SR: # %bb.0:
+; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
+; RV32IZCMP-SR-NEXT: addi sp, sp, -16
+; RV32IZCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: call foo_test_irq at plt
+; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: addi sp, sp, 16
+; RV32IZCMP-SR-NEXT: cm.pop {ra}, 64
+; RV32IZCMP-SR-NEXT: mret
+;
+; RV64IZCMP-SR-LABEL: foo_with_irq:
+; RV64IZCMP-SR: # %bb.0:
+; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
+; RV64IZCMP-SR-NEXT: addi sp, sp, -80
+; RV64IZCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: call foo_test_irq at plt
+; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: addi sp, sp, 80
+; RV64IZCMP-SR-NEXT: cm.pop {ra}, 64
+; RV64IZCMP-SR-NEXT: mret
+;
+; RV32I-LABEL: foo_with_irq:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT: call foo_test_irq at plt
+; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: mret
+;
+; RV64I-LABEL: foo_with_irq:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -128
+; RV64I-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: call foo_test_irq at plt
+; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 128
+; RV64I-NEXT: mret
+ %call = call i32 bitcast (i32 (...)* @foo_test_irq to i32 ()*)()
+ ret void
+}
+
+define void @callee_with_irq() nounwind "interrupt"="user" {
+; RV32IZCMP-LABEL: callee_with_irq:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -112
+; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV32IZCMP-NEXT: lw a0, 16(a5)
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 20(a5)
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw t4, 24(a5)
+; RV32IZCMP-NEXT: lw t5, 28(a5)
+; RV32IZCMP-NEXT: lw t6, 32(a5)
+; RV32IZCMP-NEXT: lw s2, 36(a5)
+; RV32IZCMP-NEXT: lw s3, 40(a5)
+; RV32IZCMP-NEXT: lw s4, 44(a5)
+; RV32IZCMP-NEXT: lw s5, 48(a5)
+; RV32IZCMP-NEXT: lw s6, 52(a5)
+; RV32IZCMP-NEXT: lw s7, 56(a5)
+; RV32IZCMP-NEXT: lw s8, 60(a5)
+; RV32IZCMP-NEXT: lw s9, 64(a5)
+; RV32IZCMP-NEXT: lw s10, 68(a5)
+; RV32IZCMP-NEXT: lw s11, 72(a5)
+; RV32IZCMP-NEXT: lw ra, 76(a5)
+; RV32IZCMP-NEXT: lw s1, 80(a5)
+; RV32IZCMP-NEXT: lw t3, 84(a5)
+; RV32IZCMP-NEXT: lw t2, 88(a5)
+; RV32IZCMP-NEXT: lw t1, 92(a5)
+; RV32IZCMP-NEXT: lw t0, 96(a5)
+; RV32IZCMP-NEXT: lw s0, 100(a5)
+; RV32IZCMP-NEXT: lw a6, 104(a5)
+; RV32IZCMP-NEXT: lw a4, 108(a5)
+; RV32IZCMP-NEXT: lw a0, 124(a5)
+; RV32IZCMP-NEXT: lw a1, 120(a5)
+; RV32IZCMP-NEXT: lw a2, 116(a5)
+; RV32IZCMP-NEXT: lw a3, 112(a5)
+; RV32IZCMP-NEXT: sw a0, 124(a5)
+; RV32IZCMP-NEXT: sw a1, 120(a5)
+; RV32IZCMP-NEXT: sw a2, 116(a5)
+; RV32IZCMP-NEXT: sw a3, 112(a5)
+; RV32IZCMP-NEXT: sw a4, 108(a5)
+; RV32IZCMP-NEXT: sw a6, 104(a5)
+; RV32IZCMP-NEXT: sw s0, 100(a5)
+; RV32IZCMP-NEXT: sw t0, 96(a5)
+; RV32IZCMP-NEXT: sw t1, 92(a5)
+; RV32IZCMP-NEXT: sw t2, 88(a5)
+; RV32IZCMP-NEXT: sw t3, 84(a5)
+; RV32IZCMP-NEXT: sw s1, 80(a5)
+; RV32IZCMP-NEXT: sw ra, 76(a5)
+; RV32IZCMP-NEXT: sw s11, 72(a5)
+; RV32IZCMP-NEXT: sw s10, 68(a5)
+; RV32IZCMP-NEXT: sw s9, 64(a5)
+; RV32IZCMP-NEXT: sw s8, 60(a5)
+; RV32IZCMP-NEXT: sw s7, 56(a5)
+; RV32IZCMP-NEXT: sw s6, 52(a5)
+; RV32IZCMP-NEXT: sw s5, 48(a5)
+; RV32IZCMP-NEXT: sw s4, 44(a5)
+; RV32IZCMP-NEXT: sw s3, 40(a5)
+; RV32IZCMP-NEXT: sw s2, 36(a5)
+; RV32IZCMP-NEXT: sw t6, 32(a5)
+; RV32IZCMP-NEXT: sw t5, 28(a5)
+; RV32IZCMP-NEXT: sw t4, 24(a5)
+; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 20(a5)
+; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: addi sp, sp, 48
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112
+; RV32IZCMP-NEXT: mret
+;
+; RV64IZCMP-LABEL: callee_with_irq:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: addi sp, sp, -128
+; RV64IZCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV64IZCMP-NEXT: lw a0, 16(a5)
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 20(a5)
+; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw t4, 24(a5)
+; RV64IZCMP-NEXT: lw t5, 28(a5)
+; RV64IZCMP-NEXT: lw t6, 32(a5)
+; RV64IZCMP-NEXT: lw s2, 36(a5)
+; RV64IZCMP-NEXT: lw s3, 40(a5)
+; RV64IZCMP-NEXT: lw s4, 44(a5)
+; RV64IZCMP-NEXT: lw s5, 48(a5)
+; RV64IZCMP-NEXT: lw s6, 52(a5)
+; RV64IZCMP-NEXT: lw s7, 56(a5)
+; RV64IZCMP-NEXT: lw s8, 60(a5)
+; RV64IZCMP-NEXT: lw s9, 64(a5)
+; RV64IZCMP-NEXT: lw s10, 68(a5)
+; RV64IZCMP-NEXT: lw s11, 72(a5)
+; RV64IZCMP-NEXT: lw ra, 76(a5)
+; RV64IZCMP-NEXT: lw s1, 80(a5)
+; RV64IZCMP-NEXT: lw t3, 84(a5)
+; RV64IZCMP-NEXT: lw t2, 88(a5)
+; RV64IZCMP-NEXT: lw t1, 92(a5)
+; RV64IZCMP-NEXT: lw t0, 96(a5)
+; RV64IZCMP-NEXT: lw s0, 100(a5)
+; RV64IZCMP-NEXT: lw a6, 104(a5)
+; RV64IZCMP-NEXT: lw a4, 108(a5)
+; RV64IZCMP-NEXT: lw a0, 124(a5)
+; RV64IZCMP-NEXT: lw a1, 120(a5)
+; RV64IZCMP-NEXT: lw a2, 116(a5)
+; RV64IZCMP-NEXT: lw a3, 112(a5)
+; RV64IZCMP-NEXT: sw a0, 124(a5)
+; RV64IZCMP-NEXT: sw a1, 120(a5)
+; RV64IZCMP-NEXT: sw a2, 116(a5)
+; RV64IZCMP-NEXT: sw a3, 112(a5)
+; RV64IZCMP-NEXT: sw a4, 108(a5)
+; RV64IZCMP-NEXT: sw a6, 104(a5)
+; RV64IZCMP-NEXT: sw s0, 100(a5)
+; RV64IZCMP-NEXT: sw t0, 96(a5)
+; RV64IZCMP-NEXT: sw t1, 92(a5)
+; RV64IZCMP-NEXT: sw t2, 88(a5)
+; RV64IZCMP-NEXT: sw t3, 84(a5)
+; RV64IZCMP-NEXT: sw s1, 80(a5)
+; RV64IZCMP-NEXT: sw ra, 76(a5)
+; RV64IZCMP-NEXT: sw s11, 72(a5)
+; RV64IZCMP-NEXT: sw s10, 68(a5)
+; RV64IZCMP-NEXT: sw s9, 64(a5)
+; RV64IZCMP-NEXT: sw s8, 60(a5)
+; RV64IZCMP-NEXT: sw s7, 56(a5)
+; RV64IZCMP-NEXT: sw s6, 52(a5)
+; RV64IZCMP-NEXT: sw s5, 48(a5)
+; RV64IZCMP-NEXT: sw s4, 44(a5)
+; RV64IZCMP-NEXT: sw s3, 40(a5)
+; RV64IZCMP-NEXT: sw s2, 36(a5)
+; RV64IZCMP-NEXT: sw t6, 32(a5)
+; RV64IZCMP-NEXT: sw t5, 28(a5)
+; RV64IZCMP-NEXT: sw t4, 24(a5)
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 20(a5)
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 16(a5)
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: addi sp, sp, 128
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT: mret
+;
+; RV32IZCMP-SR-LABEL: callee_with_irq:
+; RV32IZCMP-SR: # %bb.0:
+; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -112
+; RV32IZCMP-SR-NEXT: addi sp, sp, -48
+; RV32IZCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
+; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
+; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
+; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
+; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
+; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
+; RV32IZCMP-SR-NEXT: lw s5, 48(a5)
+; RV32IZCMP-SR-NEXT: lw s6, 52(a5)
+; RV32IZCMP-SR-NEXT: lw s7, 56(a5)
+; RV32IZCMP-SR-NEXT: lw s8, 60(a5)
+; RV32IZCMP-SR-NEXT: lw s9, 64(a5)
+; RV32IZCMP-SR-NEXT: lw s10, 68(a5)
+; RV32IZCMP-SR-NEXT: lw s11, 72(a5)
+; RV32IZCMP-SR-NEXT: lw ra, 76(a5)
+; RV32IZCMP-SR-NEXT: lw s1, 80(a5)
+; RV32IZCMP-SR-NEXT: lw t3, 84(a5)
+; RV32IZCMP-SR-NEXT: lw t2, 88(a5)
+; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
+; RV32IZCMP-SR-NEXT: lw t0, 96(a5)
+; RV32IZCMP-SR-NEXT: lw s0, 100(a5)
+; RV32IZCMP-SR-NEXT: lw a6, 104(a5)
+; RV32IZCMP-SR-NEXT: lw a4, 108(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
+; RV32IZCMP-SR-NEXT: lw a1, 120(a5)
+; RV32IZCMP-SR-NEXT: lw a2, 116(a5)
+; RV32IZCMP-SR-NEXT: lw a3, 112(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 124(a5)
+; RV32IZCMP-SR-NEXT: sw a1, 120(a5)
+; RV32IZCMP-SR-NEXT: sw a2, 116(a5)
+; RV32IZCMP-SR-NEXT: sw a3, 112(a5)
+; RV32IZCMP-SR-NEXT: sw a4, 108(a5)
+; RV32IZCMP-SR-NEXT: sw a6, 104(a5)
+; RV32IZCMP-SR-NEXT: sw s0, 100(a5)
+; RV32IZCMP-SR-NEXT: sw t0, 96(a5)
+; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
+; RV32IZCMP-SR-NEXT: sw t2, 88(a5)
+; RV32IZCMP-SR-NEXT: sw t3, 84(a5)
+; RV32IZCMP-SR-NEXT: sw s1, 80(a5)
+; RV32IZCMP-SR-NEXT: sw ra, 76(a5)
+; RV32IZCMP-SR-NEXT: sw s11, 72(a5)
+; RV32IZCMP-SR-NEXT: sw s10, 68(a5)
+; RV32IZCMP-SR-NEXT: sw s9, 64(a5)
+; RV32IZCMP-SR-NEXT: sw s8, 60(a5)
+; RV32IZCMP-SR-NEXT: sw s7, 56(a5)
+; RV32IZCMP-SR-NEXT: sw s6, 52(a5)
+; RV32IZCMP-SR-NEXT: sw s5, 48(a5)
+; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
+; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
+; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
+; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
+; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
+; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: addi sp, sp, 48
+; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 112
+; RV32IZCMP-SR-NEXT: mret
+;
+; RV64IZCMP-SR-LABEL: callee_with_irq:
+; RV64IZCMP-SR: # %bb.0:
+; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-SR-NEXT: addi sp, sp, -128
+; RV64IZCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
+; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
+; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
+; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
+; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
+; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
+; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
+; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
+; RV64IZCMP-SR-NEXT: lw s5, 48(a5)
+; RV64IZCMP-SR-NEXT: lw s6, 52(a5)
+; RV64IZCMP-SR-NEXT: lw s7, 56(a5)
+; RV64IZCMP-SR-NEXT: lw s8, 60(a5)
+; RV64IZCMP-SR-NEXT: lw s9, 64(a5)
+; RV64IZCMP-SR-NEXT: lw s10, 68(a5)
+; RV64IZCMP-SR-NEXT: lw s11, 72(a5)
+; RV64IZCMP-SR-NEXT: lw ra, 76(a5)
+; RV64IZCMP-SR-NEXT: lw s1, 80(a5)
+; RV64IZCMP-SR-NEXT: lw t3, 84(a5)
+; RV64IZCMP-SR-NEXT: lw t2, 88(a5)
+; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
+; RV64IZCMP-SR-NEXT: lw t0, 96(a5)
+; RV64IZCMP-SR-NEXT: lw s0, 100(a5)
+; RV64IZCMP-SR-NEXT: lw a6, 104(a5)
+; RV64IZCMP-SR-NEXT: lw a4, 108(a5)
+; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
+; RV64IZCMP-SR-NEXT: lw a1, 120(a5)
+; RV64IZCMP-SR-NEXT: lw a2, 116(a5)
+; RV64IZCMP-SR-NEXT: lw a3, 112(a5)
+; RV64IZCMP-SR-NEXT: sw a0, 124(a5)
+; RV64IZCMP-SR-NEXT: sw a1, 120(a5)
+; RV64IZCMP-SR-NEXT: sw a2, 116(a5)
+; RV64IZCMP-SR-NEXT: sw a3, 112(a5)
+; RV64IZCMP-SR-NEXT: sw a4, 108(a5)
+; RV64IZCMP-SR-NEXT: sw a6, 104(a5)
+; RV64IZCMP-SR-NEXT: sw s0, 100(a5)
+; RV64IZCMP-SR-NEXT: sw t0, 96(a5)
+; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
+; RV64IZCMP-SR-NEXT: sw t2, 88(a5)
+; RV64IZCMP-SR-NEXT: sw t3, 84(a5)
+; RV64IZCMP-SR-NEXT: sw s1, 80(a5)
+; RV64IZCMP-SR-NEXT: sw ra, 76(a5)
+; RV64IZCMP-SR-NEXT: sw s11, 72(a5)
+; RV64IZCMP-SR-NEXT: sw s10, 68(a5)
+; RV64IZCMP-SR-NEXT: sw s9, 64(a5)
+; RV64IZCMP-SR-NEXT: sw s8, 60(a5)
+; RV64IZCMP-SR-NEXT: sw s7, 56(a5)
+; RV64IZCMP-SR-NEXT: sw s6, 52(a5)
+; RV64IZCMP-SR-NEXT: sw s5, 48(a5)
+; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
+; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
+; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
+; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
+; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
+; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
+; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
+; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: addi sp, sp, 128
+; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-SR-NEXT: mret
+;
+; RV32I-LABEL: callee_with_irq:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -144
+; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t0, 136(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t1, 132(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t2, 128(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 124(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 120(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a0, 116(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a1, 112(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a2, 108(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a3, 104(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a4, 100(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a5, 96(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a6, 92(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a7, 88(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 84(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 80(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s5, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s6, 68(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s7, 64(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s8, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s9, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s10, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s11, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lui a7, %hi(var_test_irq)
+; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV32I-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV32I-NEXT: lw a0, 16(a5)
+; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, 20(a5)
+; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw t0, 24(a5)
+; RV32I-NEXT: lw t1, 28(a5)
+; RV32I-NEXT: lw t2, 32(a5)
+; RV32I-NEXT: lw t3, 36(a5)
+; RV32I-NEXT: lw t4, 40(a5)
+; RV32I-NEXT: lw t5, 44(a5)
+; RV32I-NEXT: lw t6, 48(a5)
+; RV32I-NEXT: lw s0, 52(a5)
+; RV32I-NEXT: lw s1, 56(a5)
+; RV32I-NEXT: lw s2, 60(a5)
+; RV32I-NEXT: lw s3, 64(a5)
+; RV32I-NEXT: lw s4, 68(a5)
+; RV32I-NEXT: lw s5, 72(a5)
+; RV32I-NEXT: lw s6, 76(a5)
+; RV32I-NEXT: lw s7, 80(a5)
+; RV32I-NEXT: lw s8, 84(a5)
+; RV32I-NEXT: lw s9, 88(a5)
+; RV32I-NEXT: lw s10, 92(a5)
+; RV32I-NEXT: lw s11, 96(a5)
+; RV32I-NEXT: lw ra, 100(a5)
+; RV32I-NEXT: lw a6, 104(a5)
+; RV32I-NEXT: lw a4, 108(a5)
+; RV32I-NEXT: lw a0, 124(a5)
+; RV32I-NEXT: lw a1, 120(a5)
+; RV32I-NEXT: lw a2, 116(a5)
+; RV32I-NEXT: lw a3, 112(a5)
+; RV32I-NEXT: sw a0, 124(a5)
+; RV32I-NEXT: sw a1, 120(a5)
+; RV32I-NEXT: sw a2, 116(a5)
+; RV32I-NEXT: sw a3, 112(a5)
+; RV32I-NEXT: sw a4, 108(a5)
+; RV32I-NEXT: sw a6, 104(a5)
+; RV32I-NEXT: sw ra, 100(a5)
+; RV32I-NEXT: sw s11, 96(a5)
+; RV32I-NEXT: sw s10, 92(a5)
+; RV32I-NEXT: sw s9, 88(a5)
+; RV32I-NEXT: sw s8, 84(a5)
+; RV32I-NEXT: sw s7, 80(a5)
+; RV32I-NEXT: sw s6, 76(a5)
+; RV32I-NEXT: sw s5, 72(a5)
+; RV32I-NEXT: sw s4, 68(a5)
+; RV32I-NEXT: sw s3, 64(a5)
+; RV32I-NEXT: sw s2, 60(a5)
+; RV32I-NEXT: sw s1, 56(a5)
+; RV32I-NEXT: sw s0, 52(a5)
+; RV32I-NEXT: sw t6, 48(a5)
+; RV32I-NEXT: sw t5, 44(a5)
+; RV32I-NEXT: sw t4, 40(a5)
+; RV32I-NEXT: sw t3, 36(a5)
+; RV32I-NEXT: sw t2, 32(a5)
+; RV32I-NEXT: sw t1, 28(a5)
+; RV32I-NEXT: sw t0, 24(a5)
+; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, 20(a5)
+; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, 16(a5)
+; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV32I-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t0, 136(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t1, 132(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t2, 128(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 124(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 120(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a0, 116(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a1, 112(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a2, 108(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a3, 104(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a4, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a5, 96(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a6, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a7, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 84(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 80(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s5, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s6, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s7, 64(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s8, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s9, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s11, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: mret
+;
+; RV64I-LABEL: callee_with_irq:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -272
+; RV64I-NEXT: sd ra, 264(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t0, 256(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t1, 248(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t2, 240(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 232(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 224(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a0, 216(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a1, 208(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a2, 200(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a3, 192(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a4, 184(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a5, 176(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a6, 168(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a7, 160(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 152(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 144(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s5, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s6, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s7, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s8, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s9, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s10, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s11, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lui a7, %hi(var_test_irq)
+; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV64I-NEXT: lw a0, 16(a5)
+; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, 20(a5)
+; RV64I-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw t0, 24(a5)
+; RV64I-NEXT: lw t1, 28(a5)
+; RV64I-NEXT: lw t2, 32(a5)
+; RV64I-NEXT: lw t3, 36(a5)
+; RV64I-NEXT: lw t4, 40(a5)
+; RV64I-NEXT: lw t5, 44(a5)
+; RV64I-NEXT: lw t6, 48(a5)
+; RV64I-NEXT: lw s0, 52(a5)
+; RV64I-NEXT: lw s1, 56(a5)
+; RV64I-NEXT: lw s2, 60(a5)
+; RV64I-NEXT: lw s3, 64(a5)
+; RV64I-NEXT: lw s4, 68(a5)
+; RV64I-NEXT: lw s5, 72(a5)
+; RV64I-NEXT: lw s6, 76(a5)
+; RV64I-NEXT: lw s7, 80(a5)
+; RV64I-NEXT: lw s8, 84(a5)
+; RV64I-NEXT: lw s9, 88(a5)
+; RV64I-NEXT: lw s10, 92(a5)
+; RV64I-NEXT: lw s11, 96(a5)
+; RV64I-NEXT: lw ra, 100(a5)
+; RV64I-NEXT: lw a6, 104(a5)
+; RV64I-NEXT: lw a4, 108(a5)
+; RV64I-NEXT: lw a0, 124(a5)
+; RV64I-NEXT: lw a1, 120(a5)
+; RV64I-NEXT: lw a2, 116(a5)
+; RV64I-NEXT: lw a3, 112(a5)
+; RV64I-NEXT: sw a0, 124(a5)
+; RV64I-NEXT: sw a1, 120(a5)
+; RV64I-NEXT: sw a2, 116(a5)
+; RV64I-NEXT: sw a3, 112(a5)
+; RV64I-NEXT: sw a4, 108(a5)
+; RV64I-NEXT: sw a6, 104(a5)
+; RV64I-NEXT: sw ra, 100(a5)
+; RV64I-NEXT: sw s11, 96(a5)
+; RV64I-NEXT: sw s10, 92(a5)
+; RV64I-NEXT: sw s9, 88(a5)
+; RV64I-NEXT: sw s8, 84(a5)
+; RV64I-NEXT: sw s7, 80(a5)
+; RV64I-NEXT: sw s6, 76(a5)
+; RV64I-NEXT: sw s5, 72(a5)
+; RV64I-NEXT: sw s4, 68(a5)
+; RV64I-NEXT: sw s3, 64(a5)
+; RV64I-NEXT: sw s2, 60(a5)
+; RV64I-NEXT: sw s1, 56(a5)
+; RV64I-NEXT: sw s0, 52(a5)
+; RV64I-NEXT: sw t6, 48(a5)
+; RV64I-NEXT: sw t5, 44(a5)
+; RV64I-NEXT: sw t4, 40(a5)
+; RV64I-NEXT: sw t3, 36(a5)
+; RV64I-NEXT: sw t2, 32(a5)
+; RV64I-NEXT: sw t1, 28(a5)
+; RV64I-NEXT: sw t0, 24(a5)
+; RV64I-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, 20(a5)
+; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, 16(a5)
+; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV64I-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t0, 256(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t1, 248(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t2, 240(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 232(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 224(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a0, 216(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a1, 208(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a2, 200(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a3, 192(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a4, 184(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a5, 176(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a6, 168(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a7, 160(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 152(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 144(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s5, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s6, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s7, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s8, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s9, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s10, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s11, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 272
+; RV64I-NEXT: mret
+ %val = load [32 x i32], [32 x i32]* @var_test_irq
+ store volatile [32 x i32] %val, [32 x i32]* @var_test_irq
+ ret void
+}
+
+define void @callee_no_irq() nounwind{
+; RV32IZCMP-LABEL: callee_no_irq:
+; RV32IZCMP: # %bb.0:
+; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV32IZCMP-NEXT: lw a0, 16(a5)
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw a0, 20(a5)
+; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: lw t4, 24(a5)
+; RV32IZCMP-NEXT: lw t5, 28(a5)
+; RV32IZCMP-NEXT: lw t6, 32(a5)
+; RV32IZCMP-NEXT: lw s2, 36(a5)
+; RV32IZCMP-NEXT: lw s3, 40(a5)
+; RV32IZCMP-NEXT: lw s4, 44(a5)
+; RV32IZCMP-NEXT: lw s5, 48(a5)
+; RV32IZCMP-NEXT: lw s6, 52(a5)
+; RV32IZCMP-NEXT: lw s7, 56(a5)
+; RV32IZCMP-NEXT: lw s8, 60(a5)
+; RV32IZCMP-NEXT: lw s9, 64(a5)
+; RV32IZCMP-NEXT: lw s10, 68(a5)
+; RV32IZCMP-NEXT: lw s11, 72(a5)
+; RV32IZCMP-NEXT: lw ra, 76(a5)
+; RV32IZCMP-NEXT: lw s1, 80(a5)
+; RV32IZCMP-NEXT: lw t3, 84(a5)
+; RV32IZCMP-NEXT: lw t2, 88(a5)
+; RV32IZCMP-NEXT: lw t1, 92(a5)
+; RV32IZCMP-NEXT: lw t0, 96(a5)
+; RV32IZCMP-NEXT: lw s0, 100(a5)
+; RV32IZCMP-NEXT: lw a6, 104(a5)
+; RV32IZCMP-NEXT: lw a4, 108(a5)
+; RV32IZCMP-NEXT: lw a0, 124(a5)
+; RV32IZCMP-NEXT: lw a1, 120(a5)
+; RV32IZCMP-NEXT: lw a2, 116(a5)
+; RV32IZCMP-NEXT: lw a3, 112(a5)
+; RV32IZCMP-NEXT: sw a0, 124(a5)
+; RV32IZCMP-NEXT: sw a1, 120(a5)
+; RV32IZCMP-NEXT: sw a2, 116(a5)
+; RV32IZCMP-NEXT: sw a3, 112(a5)
+; RV32IZCMP-NEXT: sw a4, 108(a5)
+; RV32IZCMP-NEXT: sw a6, 104(a5)
+; RV32IZCMP-NEXT: sw s0, 100(a5)
+; RV32IZCMP-NEXT: sw t0, 96(a5)
+; RV32IZCMP-NEXT: sw t1, 92(a5)
+; RV32IZCMP-NEXT: sw t2, 88(a5)
+; RV32IZCMP-NEXT: sw t3, 84(a5)
+; RV32IZCMP-NEXT: sw s1, 80(a5)
+; RV32IZCMP-NEXT: sw ra, 76(a5)
+; RV32IZCMP-NEXT: sw s11, 72(a5)
+; RV32IZCMP-NEXT: sw s10, 68(a5)
+; RV32IZCMP-NEXT: sw s9, 64(a5)
+; RV32IZCMP-NEXT: sw s8, 60(a5)
+; RV32IZCMP-NEXT: sw s7, 56(a5)
+; RV32IZCMP-NEXT: sw s6, 52(a5)
+; RV32IZCMP-NEXT: sw s5, 48(a5)
+; RV32IZCMP-NEXT: sw s4, 44(a5)
+; RV32IZCMP-NEXT: sw s3, 40(a5)
+; RV32IZCMP-NEXT: sw s2, 36(a5)
+; RV32IZCMP-NEXT: sw t6, 32(a5)
+; RV32IZCMP-NEXT: sw t5, 28(a5)
+; RV32IZCMP-NEXT: sw t4, 24(a5)
+; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 20(a5)
+; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 96
+; RV32IZCMP-NEXT: ret
+;
+; RV64IZCMP-LABEL: callee_no_irq:
+; RV64IZCMP: # %bb.0:
+; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV64IZCMP-NEXT: lw a0, 16(a5)
+; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 20(a5)
+; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw t4, 24(a5)
+; RV64IZCMP-NEXT: lw t5, 28(a5)
+; RV64IZCMP-NEXT: lw t6, 32(a5)
+; RV64IZCMP-NEXT: lw s2, 36(a5)
+; RV64IZCMP-NEXT: lw s3, 40(a5)
+; RV64IZCMP-NEXT: lw s4, 44(a5)
+; RV64IZCMP-NEXT: lw s5, 48(a5)
+; RV64IZCMP-NEXT: lw s6, 52(a5)
+; RV64IZCMP-NEXT: lw s7, 56(a5)
+; RV64IZCMP-NEXT: lw s8, 60(a5)
+; RV64IZCMP-NEXT: lw s9, 64(a5)
+; RV64IZCMP-NEXT: lw s10, 68(a5)
+; RV64IZCMP-NEXT: lw s11, 72(a5)
+; RV64IZCMP-NEXT: lw ra, 76(a5)
+; RV64IZCMP-NEXT: lw s1, 80(a5)
+; RV64IZCMP-NEXT: lw t3, 84(a5)
+; RV64IZCMP-NEXT: lw t2, 88(a5)
+; RV64IZCMP-NEXT: lw t1, 92(a5)
+; RV64IZCMP-NEXT: lw t0, 96(a5)
+; RV64IZCMP-NEXT: lw s0, 100(a5)
+; RV64IZCMP-NEXT: lw a6, 104(a5)
+; RV64IZCMP-NEXT: lw a4, 108(a5)
+; RV64IZCMP-NEXT: lw a0, 124(a5)
+; RV64IZCMP-NEXT: lw a1, 120(a5)
+; RV64IZCMP-NEXT: lw a2, 116(a5)
+; RV64IZCMP-NEXT: lw a3, 112(a5)
+; RV64IZCMP-NEXT: sw a0, 124(a5)
+; RV64IZCMP-NEXT: sw a1, 120(a5)
+; RV64IZCMP-NEXT: sw a2, 116(a5)
+; RV64IZCMP-NEXT: sw a3, 112(a5)
+; RV64IZCMP-NEXT: sw a4, 108(a5)
+; RV64IZCMP-NEXT: sw a6, 104(a5)
+; RV64IZCMP-NEXT: sw s0, 100(a5)
+; RV64IZCMP-NEXT: sw t0, 96(a5)
+; RV64IZCMP-NEXT: sw t1, 92(a5)
+; RV64IZCMP-NEXT: sw t2, 88(a5)
+; RV64IZCMP-NEXT: sw t3, 84(a5)
+; RV64IZCMP-NEXT: sw s1, 80(a5)
+; RV64IZCMP-NEXT: sw ra, 76(a5)
+; RV64IZCMP-NEXT: sw s11, 72(a5)
+; RV64IZCMP-NEXT: sw s10, 68(a5)
+; RV64IZCMP-NEXT: sw s9, 64(a5)
+; RV64IZCMP-NEXT: sw s8, 60(a5)
+; RV64IZCMP-NEXT: sw s7, 56(a5)
+; RV64IZCMP-NEXT: sw s6, 52(a5)
+; RV64IZCMP-NEXT: sw s5, 48(a5)
+; RV64IZCMP-NEXT: sw s4, 44(a5)
+; RV64IZCMP-NEXT: sw s3, 40(a5)
+; RV64IZCMP-NEXT: sw s2, 36(a5)
+; RV64IZCMP-NEXT: sw t6, 32(a5)
+; RV64IZCMP-NEXT: sw t5, 28(a5)
+; RV64IZCMP-NEXT: sw t4, 24(a5)
+; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 20(a5)
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, 16(a5)
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT: ret
+;
+; RV32IZCMP-SR-LABEL: callee_no_irq:
+; RV32IZCMP-SR: # %bb.0:
+; RV32IZCMP-SR-NEXT: call t0, __riscv_save_12
+; RV32IZCMP-SR-NEXT: addi sp, sp, -32
+; RV32IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
+; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
+; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
+; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
+; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
+; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
+; RV32IZCMP-SR-NEXT: lw s5, 48(a5)
+; RV32IZCMP-SR-NEXT: lw s6, 52(a5)
+; RV32IZCMP-SR-NEXT: lw s7, 56(a5)
+; RV32IZCMP-SR-NEXT: lw s8, 60(a5)
+; RV32IZCMP-SR-NEXT: lw s9, 64(a5)
+; RV32IZCMP-SR-NEXT: lw s10, 68(a5)
+; RV32IZCMP-SR-NEXT: lw s11, 72(a5)
+; RV32IZCMP-SR-NEXT: lw ra, 76(a5)
+; RV32IZCMP-SR-NEXT: lw s1, 80(a5)
+; RV32IZCMP-SR-NEXT: lw t3, 84(a5)
+; RV32IZCMP-SR-NEXT: lw t2, 88(a5)
+; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
+; RV32IZCMP-SR-NEXT: lw t0, 96(a5)
+; RV32IZCMP-SR-NEXT: lw s0, 100(a5)
+; RV32IZCMP-SR-NEXT: lw a6, 104(a5)
+; RV32IZCMP-SR-NEXT: lw a4, 108(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
+; RV32IZCMP-SR-NEXT: lw a1, 120(a5)
+; RV32IZCMP-SR-NEXT: lw a2, 116(a5)
+; RV32IZCMP-SR-NEXT: lw a3, 112(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 124(a5)
+; RV32IZCMP-SR-NEXT: sw a1, 120(a5)
+; RV32IZCMP-SR-NEXT: sw a2, 116(a5)
+; RV32IZCMP-SR-NEXT: sw a3, 112(a5)
+; RV32IZCMP-SR-NEXT: sw a4, 108(a5)
+; RV32IZCMP-SR-NEXT: sw a6, 104(a5)
+; RV32IZCMP-SR-NEXT: sw s0, 100(a5)
+; RV32IZCMP-SR-NEXT: sw t0, 96(a5)
+; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
+; RV32IZCMP-SR-NEXT: sw t2, 88(a5)
+; RV32IZCMP-SR-NEXT: sw t3, 84(a5)
+; RV32IZCMP-SR-NEXT: sw s1, 80(a5)
+; RV32IZCMP-SR-NEXT: sw ra, 76(a5)
+; RV32IZCMP-SR-NEXT: sw s11, 72(a5)
+; RV32IZCMP-SR-NEXT: sw s10, 68(a5)
+; RV32IZCMP-SR-NEXT: sw s9, 64(a5)
+; RV32IZCMP-SR-NEXT: sw s8, 60(a5)
+; RV32IZCMP-SR-NEXT: sw s7, 56(a5)
+; RV32IZCMP-SR-NEXT: sw s6, 52(a5)
+; RV32IZCMP-SR-NEXT: sw s5, 48(a5)
+; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
+; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
+; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
+; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
+; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
+; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV32IZCMP-SR-NEXT: addi sp, sp, 32
+; RV32IZCMP-SR-NEXT: tail __riscv_restore_12
+;
+; RV64IZCMP-SR-LABEL: callee_no_irq:
+; RV64IZCMP-SR: # %bb.0:
+; RV64IZCMP-SR-NEXT: call t0, __riscv_save_12
+; RV64IZCMP-SR-NEXT: addi sp, sp, -48
+; RV64IZCMP-SR-NEXT: lui a7, %hi(var_test_irq)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
+; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
+; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
+; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
+; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
+; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
+; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
+; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
+; RV64IZCMP-SR-NEXT: lw s5, 48(a5)
+; RV64IZCMP-SR-NEXT: lw s6, 52(a5)
+; RV64IZCMP-SR-NEXT: lw s7, 56(a5)
+; RV64IZCMP-SR-NEXT: lw s8, 60(a5)
+; RV64IZCMP-SR-NEXT: lw s9, 64(a5)
+; RV64IZCMP-SR-NEXT: lw s10, 68(a5)
+; RV64IZCMP-SR-NEXT: lw s11, 72(a5)
+; RV64IZCMP-SR-NEXT: lw ra, 76(a5)
+; RV64IZCMP-SR-NEXT: lw s1, 80(a5)
+; RV64IZCMP-SR-NEXT: lw t3, 84(a5)
+; RV64IZCMP-SR-NEXT: lw t2, 88(a5)
+; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
+; RV64IZCMP-SR-NEXT: lw t0, 96(a5)
+; RV64IZCMP-SR-NEXT: lw s0, 100(a5)
+; RV64IZCMP-SR-NEXT: lw a6, 104(a5)
+; RV64IZCMP-SR-NEXT: lw a4, 108(a5)
+; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
+; RV64IZCMP-SR-NEXT: lw a1, 120(a5)
+; RV64IZCMP-SR-NEXT: lw a2, 116(a5)
+; RV64IZCMP-SR-NEXT: lw a3, 112(a5)
+; RV64IZCMP-SR-NEXT: sw a0, 124(a5)
+; RV64IZCMP-SR-NEXT: sw a1, 120(a5)
+; RV64IZCMP-SR-NEXT: sw a2, 116(a5)
+; RV64IZCMP-SR-NEXT: sw a3, 112(a5)
+; RV64IZCMP-SR-NEXT: sw a4, 108(a5)
+; RV64IZCMP-SR-NEXT: sw a6, 104(a5)
+; RV64IZCMP-SR-NEXT: sw s0, 100(a5)
+; RV64IZCMP-SR-NEXT: sw t0, 96(a5)
+; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
+; RV64IZCMP-SR-NEXT: sw t2, 88(a5)
+; RV64IZCMP-SR-NEXT: sw t3, 84(a5)
+; RV64IZCMP-SR-NEXT: sw s1, 80(a5)
+; RV64IZCMP-SR-NEXT: sw ra, 76(a5)
+; RV64IZCMP-SR-NEXT: sw s11, 72(a5)
+; RV64IZCMP-SR-NEXT: sw s10, 68(a5)
+; RV64IZCMP-SR-NEXT: sw s9, 64(a5)
+; RV64IZCMP-SR-NEXT: sw s8, 60(a5)
+; RV64IZCMP-SR-NEXT: sw s7, 56(a5)
+; RV64IZCMP-SR-NEXT: sw s6, 52(a5)
+; RV64IZCMP-SR-NEXT: sw s5, 48(a5)
+; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
+; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
+; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
+; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
+; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
+; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
+; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
+; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV64IZCMP-SR-NEXT: addi sp, sp, 48
+; RV64IZCMP-SR-NEXT: tail __riscv_restore_12
+;
+; RV32I-LABEL: callee_no_irq:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -80
+; RV32I-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lui a7, %hi(var_test_irq)
+; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV32I-NEXT: lw a0, 16(a5)
+; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, 20(a5)
+; RV32I-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw t0, 24(a5)
+; RV32I-NEXT: lw t1, 28(a5)
+; RV32I-NEXT: lw t2, 32(a5)
+; RV32I-NEXT: lw t3, 36(a5)
+; RV32I-NEXT: lw t4, 40(a5)
+; RV32I-NEXT: lw t5, 44(a5)
+; RV32I-NEXT: lw t6, 48(a5)
+; RV32I-NEXT: lw s0, 52(a5)
+; RV32I-NEXT: lw s1, 56(a5)
+; RV32I-NEXT: lw s2, 60(a5)
+; RV32I-NEXT: lw s3, 64(a5)
+; RV32I-NEXT: lw s4, 68(a5)
+; RV32I-NEXT: lw s5, 72(a5)
+; RV32I-NEXT: lw s6, 76(a5)
+; RV32I-NEXT: lw s7, 80(a5)
+; RV32I-NEXT: lw s8, 84(a5)
+; RV32I-NEXT: lw s9, 88(a5)
+; RV32I-NEXT: lw s10, 92(a5)
+; RV32I-NEXT: lw s11, 96(a5)
+; RV32I-NEXT: lw ra, 100(a5)
+; RV32I-NEXT: lw a6, 104(a5)
+; RV32I-NEXT: lw a4, 108(a5)
+; RV32I-NEXT: lw a0, 124(a5)
+; RV32I-NEXT: lw a1, 120(a5)
+; RV32I-NEXT: lw a2, 116(a5)
+; RV32I-NEXT: lw a3, 112(a5)
+; RV32I-NEXT: sw a0, 124(a5)
+; RV32I-NEXT: sw a1, 120(a5)
+; RV32I-NEXT: sw a2, 116(a5)
+; RV32I-NEXT: sw a3, 112(a5)
+; RV32I-NEXT: sw a4, 108(a5)
+; RV32I-NEXT: sw a6, 104(a5)
+; RV32I-NEXT: sw ra, 100(a5)
+; RV32I-NEXT: sw s11, 96(a5)
+; RV32I-NEXT: sw s10, 92(a5)
+; RV32I-NEXT: sw s9, 88(a5)
+; RV32I-NEXT: sw s8, 84(a5)
+; RV32I-NEXT: sw s7, 80(a5)
+; RV32I-NEXT: sw s6, 76(a5)
+; RV32I-NEXT: sw s5, 72(a5)
+; RV32I-NEXT: sw s4, 68(a5)
+; RV32I-NEXT: sw s3, 64(a5)
+; RV32I-NEXT: sw s2, 60(a5)
+; RV32I-NEXT: sw s1, 56(a5)
+; RV32I-NEXT: sw s0, 52(a5)
+; RV32I-NEXT: sw t6, 48(a5)
+; RV32I-NEXT: sw t5, 44(a5)
+; RV32I-NEXT: sw t4, 40(a5)
+; RV32I-NEXT: sw t3, 36(a5)
+; RV32I-NEXT: sw t2, 32(a5)
+; RV32I-NEXT: sw t1, 28(a5)
+; RV32I-NEXT: sw t0, 24(a5)
+; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, 20(a5)
+; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, 16(a5)
+; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 80
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: callee_no_irq:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -160
+; RV64I-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s8, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lui a7, %hi(var_test_irq)
+; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
+; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
+; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
+; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
+; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi a5, a7, %lo(var_test_irq)
+; RV64I-NEXT: lw a0, 16(a5)
+; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw a0, 20(a5)
+; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lw t0, 24(a5)
+; RV64I-NEXT: lw t1, 28(a5)
+; RV64I-NEXT: lw t2, 32(a5)
+; RV64I-NEXT: lw t3, 36(a5)
+; RV64I-NEXT: lw t4, 40(a5)
+; RV64I-NEXT: lw t5, 44(a5)
+; RV64I-NEXT: lw t6, 48(a5)
+; RV64I-NEXT: lw s0, 52(a5)
+; RV64I-NEXT: lw s1, 56(a5)
+; RV64I-NEXT: lw s2, 60(a5)
+; RV64I-NEXT: lw s3, 64(a5)
+; RV64I-NEXT: lw s4, 68(a5)
+; RV64I-NEXT: lw s5, 72(a5)
+; RV64I-NEXT: lw s6, 76(a5)
+; RV64I-NEXT: lw s7, 80(a5)
+; RV64I-NEXT: lw s8, 84(a5)
+; RV64I-NEXT: lw s9, 88(a5)
+; RV64I-NEXT: lw s10, 92(a5)
+; RV64I-NEXT: lw s11, 96(a5)
+; RV64I-NEXT: lw ra, 100(a5)
+; RV64I-NEXT: lw a6, 104(a5)
+; RV64I-NEXT: lw a4, 108(a5)
+; RV64I-NEXT: lw a0, 124(a5)
+; RV64I-NEXT: lw a1, 120(a5)
+; RV64I-NEXT: lw a2, 116(a5)
+; RV64I-NEXT: lw a3, 112(a5)
+; RV64I-NEXT: sw a0, 124(a5)
+; RV64I-NEXT: sw a1, 120(a5)
+; RV64I-NEXT: sw a2, 116(a5)
+; RV64I-NEXT: sw a3, 112(a5)
+; RV64I-NEXT: sw a4, 108(a5)
+; RV64I-NEXT: sw a6, 104(a5)
+; RV64I-NEXT: sw ra, 100(a5)
+; RV64I-NEXT: sw s11, 96(a5)
+; RV64I-NEXT: sw s10, 92(a5)
+; RV64I-NEXT: sw s9, 88(a5)
+; RV64I-NEXT: sw s8, 84(a5)
+; RV64I-NEXT: sw s7, 80(a5)
+; RV64I-NEXT: sw s6, 76(a5)
+; RV64I-NEXT: sw s5, 72(a5)
+; RV64I-NEXT: sw s4, 68(a5)
+; RV64I-NEXT: sw s3, 64(a5)
+; RV64I-NEXT: sw s2, 60(a5)
+; RV64I-NEXT: sw s1, 56(a5)
+; RV64I-NEXT: sw s0, 52(a5)
+; RV64I-NEXT: sw t6, 48(a5)
+; RV64I-NEXT: sw t5, 44(a5)
+; RV64I-NEXT: sw t4, 40(a5)
+; RV64I-NEXT: sw t3, 36(a5)
+; RV64I-NEXT: sw t2, 32(a5)
+; RV64I-NEXT: sw t1, 28(a5)
+; RV64I-NEXT: sw t0, 24(a5)
+; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, 20(a5)
+; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, 16(a5)
+; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
+; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
+; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
+; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: sw a0, %lo(var_test_irq)(a7)
+; RV64I-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s8, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 160
+; RV64I-NEXT: ret
+ %val = load [32 x i32], [32 x i32]* @var_test_irq
+ store volatile [32 x i32] %val, [32 x i32]* @var_test_irq
+ ret void
+}
More information about the llvm-commits
mailing list