[llvm] [RISCV] Loosen the requirement of shadow stack codegen to Zimop (PR #152251)

Jesse Huang via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 20 06:58:48 PDT 2025


https://github.com/jaidTw updated https://github.com/llvm/llvm-project/pull/152251

>From b45f51fc78519551e01927dcd5584baac8b81f25 Mon Sep 17 00:00:00 2001
From: Jesse Huang <jesse.huang at sifive.com>
Date: Tue, 5 Aug 2025 22:05:35 -0700
Subject: [PATCH 1/3] [RISCV] Loosen the requirement of shadow stack codegen to
 Zimop

Loosen the hard requirement of shadow stack codegen to Zimop, and
emit mop/c.mop/zicfiss accordingly to the available extensions.
---
 llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 9fc0d815ceee3..97d43e42284ee 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -107,7 +107,7 @@ static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
                             const DebugLoc &DL) {
   const auto &STI = MF.getSubtarget<RISCVSubtarget>();
   bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
-                          STI.hasStdExtZicfiss();
+                          STI.hasStdExtZimop();
   bool HasSWShadowStack =
       MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
   if (!HasHWShadowStack && !HasSWShadowStack)
@@ -123,8 +123,14 @@ static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
     return;
 
   const RISCVInstrInfo *TII = STI.getInstrInfo();
+  // Prefer HW shadow stack over SW shadow stack.
   if (HasHWShadowStack) {
-    BuildMI(MBB, MI, DL, TII->get(RISCV::SSPUSH)).addReg(RAReg);
+    if (STI.hasStdExtZicfiss())
+      BuildMI(MBB, MI, DL, TII->get(RISCV::SSPUSH)).addReg(RAReg);
+    else if (STI.hasStdExtZcmop())
+      BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_C_SSPUSH)).addReg(RAReg);
+    else
+      BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_SSPUSH)).addReg(RAReg);
     return;
   }
 
@@ -172,7 +178,7 @@ static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
                             const DebugLoc &DL) {
   const auto &STI = MF.getSubtarget<RISCVSubtarget>();
   bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
-                          STI.hasStdExtZicfiss();
+                          STI.hasStdExtZimop();
   bool HasSWShadowStack =
       MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
   if (!HasHWShadowStack && !HasSWShadowStack)
@@ -185,8 +191,12 @@ static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
     return;
 
   const RISCVInstrInfo *TII = STI.getInstrInfo();
+  // Prefer HW shadow stack over SW shadow stack.
   if (HasHWShadowStack) {
-    BuildMI(MBB, MI, DL, TII->get(RISCV::SSPOPCHK)).addReg(RAReg);
+    if (STI.hasStdExtZicfiss())
+      BuildMI(MBB, MI, DL, TII->get(RISCV::SSPOPCHK)).addReg(RAReg);
+    else
+      BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_SSPOPCHK)).addReg(RAReg);
     return;
   }
 

>From 16cbf045e4ea941a3384ed0446637c6b31e053f5 Mon Sep 17 00:00:00 2001
From: Jesse Huang <jesse.huang at sifive.com>
Date: Wed, 20 Aug 2025 06:56:49 -0700
Subject: [PATCH 2/3] [RISCV] Add PseudoMOP instructions for sspush/sspopchk

---
 .../lib/Target/RISCV/RISCVInstrInfoZicfiss.td | 20 +++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZicfiss.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZicfiss.td
index 49a57f86cccd6..3c3e63c6d3e2f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZicfiss.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZicfiss.td
@@ -62,6 +62,26 @@ defm SSAMOSWAP_W  : AMO_rr_aq_rl<0b01001, 0b010, "ssamoswap.w">;
 let Predicates = [HasStdExtZicfiss, IsRV64] in
 defm SSAMOSWAP_D  : AMO_rr_aq_rl<0b01001, 0b011, "ssamoswap.d">;
 
+let Predicates = [HasStdExtZimop] in {
+let Uses = [SSP], Defs = [SSP], hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in
+def PseudoMOP_SSPUSH : RVInstR<0b1100111, 0b100, OPC_SYSTEM, (outs), (ins GPRX1X5:$rs2),
+                     "mop.rr.7", "$rs2"> {
+  let rd = 0b00000;
+  let rs1 = 0b00000;
+}
+let Uses = [SSP], Defs = [SSP], hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in
+def PseudoMOP_SSPOPCHK : RVInstI<0b100, OPC_SYSTEM, (outs), (ins GPRX1X5:$rs1), "mop.r.28",
+                       "$rs1"> {
+  let rd = 0;
+  let imm12 = 0b110011011100;
+}
+}
+
+let Predicates = [HasStdExtZcmop] in {
+let Uses = [SSP], Defs = [SSP], hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in
+def PseudoMOP_C_SSPUSH : RVC_SSInst<0b00001, GPRX1, "c.mop.1">;
+}
+
 //===----------------------------------------------------------------------===/
 // Compress Instruction tablegen backend.
 //===----------------------------------------------------------------------===//

>From 038d11b558e5639ce193c9f4bd7676d4dfce9098 Mon Sep 17 00:00:00 2001
From: Jesse Huang <jesse.huang at sifive.com>
Date: Wed, 20 Aug 2025 06:57:38 -0700
Subject: [PATCH 3/3] Update test

---
 llvm/test/CodeGen/RISCV/shadowcallstack.ll | 888 +++++++++++++++++++++
 1 file changed, 888 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index 03acd9491fed8..afe170aeca4ec 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -3,6 +3,14 @@
 ; RUN:   | FileCheck %s --check-prefix=RV32
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s --check-prefix=RV64
+; RUN: llc -mtriple=riscv32 -mattr=+zimop < %s \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefix=RV32-ZIMOP
+; RUN: llc -mtriple=riscv64 -mattr=+zimop < %s \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefix=RV64-ZIMOP
+; RUN: llc -mtriple=riscv32 -mattr=+zimop,+zcmop < %s \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefix=RV32-ZCMOP
+; RUN: llc -mtriple=riscv64 -mattr=+zimop,+zcmop < %s \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefix=RV64-ZCMOP
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicfiss < %s \
 ; RUN:   -verify-machineinstrs | FileCheck %s --check-prefix=RV32-ZICFISS
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfiss < %s \
@@ -17,6 +25,22 @@ define void @f1() shadowcallstack {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f1:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f1:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f1:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f1:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f1:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    ret
@@ -38,6 +62,22 @@ define void @f2() shadowcallstack {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    tail foo
 ;
+; RV32-ZIMOP-LABEL: f2:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    tail foo
+;
+; RV64-ZIMOP-LABEL: f2:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    tail foo
+;
+; RV32-ZCMOP-LABEL: f2:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    tail foo
+;
+; RV64-ZCMOP-LABEL: f2:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    tail foo
+;
 ; RV32-ZICFISS-LABEL: f2:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    tail foo
@@ -90,6 +130,82 @@ define i32 @f3() shadowcallstack {
 ; RV64-NEXT:    .cfi_restore gp
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f3:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    addi gp, gp, 4
+; RV32-ZIMOP-NEXT:    sw ra, -4(gp)
+; RV32-ZIMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    .cfi_restore ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZIMOP-NEXT:    lw ra, -4(gp)
+; RV32-ZIMOP-NEXT:    addi gp, gp, -4
+; RV32-ZIMOP-NEXT:    .cfi_restore gp
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f3:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    addi gp, gp, 8
+; RV64-ZIMOP-NEXT:    sd ra, -8(gp)
+; RV64-ZIMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-ZIMOP-NEXT:    addi sp, sp, -16
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ZIMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    .cfi_restore ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, 16
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZIMOP-NEXT:    ld ra, -8(gp)
+; RV64-ZIMOP-NEXT:    addi gp, gp, -8
+; RV64-ZIMOP-NEXT:    .cfi_restore gp
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f3:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    addi gp, gp, 4
+; RV32-ZCMOP-NEXT:    sw ra, -4(gp)
+; RV32-ZCMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    .cfi_restore ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZCMOP-NEXT:    lw ra, -4(gp)
+; RV32-ZCMOP-NEXT:    addi gp, gp, -4
+; RV32-ZCMOP-NEXT:    .cfi_restore gp
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f3:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    addi gp, gp, 8
+; RV64-ZCMOP-NEXT:    sd ra, -8(gp)
+; RV64-ZCMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-ZCMOP-NEXT:    addi sp, sp, -16
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ZCMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    .cfi_restore ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, 16
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZCMOP-NEXT:    ld ra, -8(gp)
+; RV64-ZCMOP-NEXT:    addi gp, gp, -8
+; RV64-ZCMOP-NEXT:    .cfi_restore gp
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f3:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    addi gp, gp, 4
@@ -213,6 +329,166 @@ define i32 @f4() shadowcallstack {
 ; RV64-NEXT:    .cfi_restore gp
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f4:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    addi gp, gp, 4
+; RV32-ZIMOP-NEXT:    sw ra, -4(gp)
+; RV32-ZIMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZIMOP-NEXT:    .cfi_offset s0, -8
+; RV32-ZIMOP-NEXT:    .cfi_offset s1, -12
+; RV32-ZIMOP-NEXT:    .cfi_offset s2, -16
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s0, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s1, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s2, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    add s0, s0, s1
+; RV32-ZIMOP-NEXT:    add a0, s2, a0
+; RV32-ZIMOP-NEXT:    add a0, s0, a0
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    .cfi_restore ra
+; RV32-ZIMOP-NEXT:    .cfi_restore s0
+; RV32-ZIMOP-NEXT:    .cfi_restore s1
+; RV32-ZIMOP-NEXT:    .cfi_restore s2
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZIMOP-NEXT:    lw ra, -4(gp)
+; RV32-ZIMOP-NEXT:    addi gp, gp, -4
+; RV32-ZIMOP-NEXT:    .cfi_restore gp
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f4:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    addi gp, gp, 8
+; RV64-ZIMOP-NEXT:    sd ra, -8(gp)
+; RV64-ZIMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-ZIMOP-NEXT:    addi sp, sp, -32
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 32
+; RV64-ZIMOP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZIMOP-NEXT:    .cfi_offset s0, -16
+; RV64-ZIMOP-NEXT:    .cfi_offset s1, -24
+; RV64-ZIMOP-NEXT:    .cfi_offset s2, -32
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s0, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s1, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s2, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    add s0, s0, s1
+; RV64-ZIMOP-NEXT:    add a0, s2, a0
+; RV64-ZIMOP-NEXT:    addw a0, s0, a0
+; RV64-ZIMOP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    .cfi_restore ra
+; RV64-ZIMOP-NEXT:    .cfi_restore s0
+; RV64-ZIMOP-NEXT:    .cfi_restore s1
+; RV64-ZIMOP-NEXT:    .cfi_restore s2
+; RV64-ZIMOP-NEXT:    addi sp, sp, 32
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZIMOP-NEXT:    ld ra, -8(gp)
+; RV64-ZIMOP-NEXT:    addi gp, gp, -8
+; RV64-ZIMOP-NEXT:    .cfi_restore gp
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f4:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    addi gp, gp, 4
+; RV32-ZCMOP-NEXT:    sw ra, -4(gp)
+; RV32-ZCMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZCMOP-NEXT:    .cfi_offset s0, -8
+; RV32-ZCMOP-NEXT:    .cfi_offset s1, -12
+; RV32-ZCMOP-NEXT:    .cfi_offset s2, -16
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s2, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s1, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s0, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    add s1, s1, s2
+; RV32-ZCMOP-NEXT:    add a0, a0, s0
+; RV32-ZCMOP-NEXT:    add a0, a0, s1
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    .cfi_restore ra
+; RV32-ZCMOP-NEXT:    .cfi_restore s0
+; RV32-ZCMOP-NEXT:    .cfi_restore s1
+; RV32-ZCMOP-NEXT:    .cfi_restore s2
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZCMOP-NEXT:    lw ra, -4(gp)
+; RV32-ZCMOP-NEXT:    addi gp, gp, -4
+; RV32-ZCMOP-NEXT:    .cfi_restore gp
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f4:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    addi gp, gp, 8
+; RV64-ZCMOP-NEXT:    sd ra, -8(gp)
+; RV64-ZCMOP-NEXT:    .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-ZCMOP-NEXT:    addi sp, sp, -32
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 32
+; RV64-ZCMOP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZCMOP-NEXT:    .cfi_offset s0, -16
+; RV64-ZCMOP-NEXT:    .cfi_offset s1, -24
+; RV64-ZCMOP-NEXT:    .cfi_offset s2, -32
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s2, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s1, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s0, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    add s1, s1, s2
+; RV64-ZCMOP-NEXT:    add a0, a0, s0
+; RV64-ZCMOP-NEXT:    addw a0, a0, s1
+; RV64-ZCMOP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    .cfi_restore ra
+; RV64-ZCMOP-NEXT:    .cfi_restore s0
+; RV64-ZCMOP-NEXT:    .cfi_restore s1
+; RV64-ZCMOP-NEXT:    .cfi_restore s2
+; RV64-ZCMOP-NEXT:    addi sp, sp, 32
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZCMOP-NEXT:    ld ra, -8(gp)
+; RV64-ZCMOP-NEXT:    addi gp, gp, -8
+; RV64-ZCMOP-NEXT:    .cfi_restore gp
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f4:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    addi gp, gp, 4
@@ -329,6 +605,58 @@ define i32 @f5() shadowcallstack nounwind {
 ; RV64-NEXT:    addi gp, gp, -8
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f5:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    addi gp, gp, 4
+; RV32-ZIMOP-NEXT:    sw ra, -4(gp)
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    lw ra, -4(gp)
+; RV32-ZIMOP-NEXT:    addi gp, gp, -4
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f5:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    addi gp, gp, 8
+; RV64-ZIMOP-NEXT:    sd ra, -8(gp)
+; RV64-ZIMOP-NEXT:    addi sp, sp, -16
+; RV64-ZIMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    addi sp, sp, 16
+; RV64-ZIMOP-NEXT:    ld ra, -8(gp)
+; RV64-ZIMOP-NEXT:    addi gp, gp, -8
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f5:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    addi gp, gp, 4
+; RV32-ZCMOP-NEXT:    sw ra, -4(gp)
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    lw ra, -4(gp)
+; RV32-ZCMOP-NEXT:    addi gp, gp, -4
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f5:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    addi gp, gp, 8
+; RV64-ZCMOP-NEXT:    sd ra, -8(gp)
+; RV64-ZCMOP-NEXT:    addi sp, sp, -16
+; RV64-ZCMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    addi sp, sp, 16
+; RV64-ZCMOP-NEXT:    ld ra, -8(gp)
+; RV64-ZCMOP-NEXT:    addi gp, gp, -8
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f5:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    addi gp, gp, 4
@@ -368,6 +696,22 @@ define void @f1_hw() "hw-shadow-stack" {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f1_hw:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f1_hw:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f1_hw:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f1_hw:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f1_hw:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    ret
@@ -387,6 +731,22 @@ define void @f2_hw() "hw-shadow-stack" {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    tail foo
 ;
+; RV32-ZIMOP-LABEL: f2_hw:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    tail foo
+;
+; RV64-ZIMOP-LABEL: f2_hw:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    tail foo
+;
+; RV32-ZCMOP-LABEL: f2_hw:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    tail foo
+;
+; RV64-ZCMOP-LABEL: f2_hw:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    tail foo
+;
 ; RV32-ZICFISS-LABEL: f2_hw:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    tail foo
@@ -425,6 +785,66 @@ define i32 @f3_hw() "hw-shadow-stack" {
 ; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f3_hw:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    mop.rr.7 ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    .cfi_restore ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZIMOP-NEXT:    mop.r.28 ra
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f3_hw:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    mop.rr.7 ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, -16
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ZIMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    .cfi_restore ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, 16
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZIMOP-NEXT:    mop.r.28 ra
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f3_hw:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    c.mop.1 ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    .cfi_restore ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZCMOP-NEXT:    mop.r.28 ra
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f3_hw:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    c.mop.1 ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, -16
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ZCMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    .cfi_restore ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, 16
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZCMOP-NEXT:    mop.r.28 ra
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f3_hw:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    sspush ra
@@ -528,6 +948,150 @@ define i32 @f4_hw() "hw-shadow-stack" {
 ; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f4_hw:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    mop.rr.7 ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZIMOP-NEXT:    .cfi_offset s0, -8
+; RV32-ZIMOP-NEXT:    .cfi_offset s1, -12
+; RV32-ZIMOP-NEXT:    .cfi_offset s2, -16
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s0, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s1, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s2, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    add s0, s0, s1
+; RV32-ZIMOP-NEXT:    add a0, s2, a0
+; RV32-ZIMOP-NEXT:    add a0, s0, a0
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    .cfi_restore ra
+; RV32-ZIMOP-NEXT:    .cfi_restore s0
+; RV32-ZIMOP-NEXT:    .cfi_restore s1
+; RV32-ZIMOP-NEXT:    .cfi_restore s2
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZIMOP-NEXT:    mop.r.28 ra
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f4_hw:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    mop.rr.7 ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, -32
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 32
+; RV64-ZIMOP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZIMOP-NEXT:    .cfi_offset s0, -16
+; RV64-ZIMOP-NEXT:    .cfi_offset s1, -24
+; RV64-ZIMOP-NEXT:    .cfi_offset s2, -32
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s0, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s1, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s2, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    add s0, s0, s1
+; RV64-ZIMOP-NEXT:    add a0, s2, a0
+; RV64-ZIMOP-NEXT:    addw a0, s0, a0
+; RV64-ZIMOP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    .cfi_restore ra
+; RV64-ZIMOP-NEXT:    .cfi_restore s0
+; RV64-ZIMOP-NEXT:    .cfi_restore s1
+; RV64-ZIMOP-NEXT:    .cfi_restore s2
+; RV64-ZIMOP-NEXT:    addi sp, sp, 32
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZIMOP-NEXT:    mop.r.28 ra
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f4_hw:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    c.mop.1 ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZCMOP-NEXT:    .cfi_offset s0, -8
+; RV32-ZCMOP-NEXT:    .cfi_offset s1, -12
+; RV32-ZCMOP-NEXT:    .cfi_offset s2, -16
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s2, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s1, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s0, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    add s1, s1, s2
+; RV32-ZCMOP-NEXT:    add a0, a0, s0
+; RV32-ZCMOP-NEXT:    add a0, a0, s1
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    .cfi_restore ra
+; RV32-ZCMOP-NEXT:    .cfi_restore s0
+; RV32-ZCMOP-NEXT:    .cfi_restore s1
+; RV32-ZCMOP-NEXT:    .cfi_restore s2
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZCMOP-NEXT:    mop.r.28 ra
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f4_hw:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    c.mop.1 ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, -32
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 32
+; RV64-ZCMOP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZCMOP-NEXT:    .cfi_offset s0, -16
+; RV64-ZCMOP-NEXT:    .cfi_offset s1, -24
+; RV64-ZCMOP-NEXT:    .cfi_offset s2, -32
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s2, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s1, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s0, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    add s1, s1, s2
+; RV64-ZCMOP-NEXT:    add a0, a0, s0
+; RV64-ZCMOP-NEXT:    addw a0, a0, s1
+; RV64-ZCMOP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    .cfi_restore ra
+; RV64-ZCMOP-NEXT:    .cfi_restore s0
+; RV64-ZCMOP-NEXT:    .cfi_restore s1
+; RV64-ZCMOP-NEXT:    .cfi_restore s2
+; RV64-ZCMOP-NEXT:    addi sp, sp, 32
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZCMOP-NEXT:    mop.r.28 ra
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f4_hw:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    sspush ra
@@ -628,6 +1192,50 @@ define i32 @f5_hw() "hw-shadow-stack" nounwind {
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f5_hw:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    mop.rr.7 ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    mop.r.28 ra
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f5_hw:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    mop.rr.7 ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, -16
+; RV64-ZIMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    addi sp, sp, 16
+; RV64-ZIMOP-NEXT:    mop.r.28 ra
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f5_hw:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    c.mop.1 ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    mop.r.28 ra
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f5_hw:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    c.mop.1 ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, -16
+; RV64-ZCMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    addi sp, sp, 16
+; RV64-ZCMOP-NEXT:    mop.r.28 ra
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f5_hw:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    sspush ra
@@ -663,6 +1271,22 @@ define void @f1_both() "hw-shadow-stack" shadowcallstack {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f1_both:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f1_both:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f1_both:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f1_both:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f1_both:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    ret
@@ -682,6 +1306,22 @@ define void @f2_both() "hw-shadow-stack" shadowcallstack {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    tail foo
 ;
+; RV32-ZIMOP-LABEL: f2_both:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    tail foo
+;
+; RV64-ZIMOP-LABEL: f2_both:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    tail foo
+;
+; RV32-ZCMOP-LABEL: f2_both:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    tail foo
+;
+; RV64-ZCMOP-LABEL: f2_both:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    tail foo
+;
 ; RV32-ZICFISS-LABEL: f2_both:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    tail foo
@@ -732,6 +1372,66 @@ define i32 @f3_both() "hw-shadow-stack" shadowcallstack {
 ; RV64-NEXT:    .cfi_restore gp
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f3_both:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    mop.rr.7 ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    .cfi_restore ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZIMOP-NEXT:    mop.r.28 ra
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f3_both:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    mop.rr.7 ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, -16
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ZIMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    .cfi_restore ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, 16
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZIMOP-NEXT:    mop.r.28 ra
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f3_both:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    c.mop.1 ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    .cfi_restore ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZCMOP-NEXT:    mop.r.28 ra
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f3_both:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    c.mop.1 ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, -16
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ZCMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    .cfi_restore ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, 16
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZCMOP-NEXT:    mop.r.28 ra
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f3_both:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    sspush ra
@@ -847,6 +1547,150 @@ define i32 @f4_both() "hw-shadow-stack" shadowcallstack {
 ; RV64-NEXT:    .cfi_restore gp
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f4_both:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    mop.rr.7 ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZIMOP-NEXT:    .cfi_offset s0, -8
+; RV32-ZIMOP-NEXT:    .cfi_offset s1, -12
+; RV32-ZIMOP-NEXT:    .cfi_offset s2, -16
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s0, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s1, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    mv s2, a0
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    add s0, s0, s1
+; RV32-ZIMOP-NEXT:    add a0, s2, a0
+; RV32-ZIMOP-NEXT:    add a0, s0, a0
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    .cfi_restore ra
+; RV32-ZIMOP-NEXT:    .cfi_restore s0
+; RV32-ZIMOP-NEXT:    .cfi_restore s1
+; RV32-ZIMOP-NEXT:    .cfi_restore s2
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZIMOP-NEXT:    mop.r.28 ra
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f4_both:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    mop.rr.7 ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, -32
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 32
+; RV64-ZIMOP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZIMOP-NEXT:    .cfi_offset s0, -16
+; RV64-ZIMOP-NEXT:    .cfi_offset s1, -24
+; RV64-ZIMOP-NEXT:    .cfi_offset s2, -32
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s0, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s1, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    mv s2, a0
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    add s0, s0, s1
+; RV64-ZIMOP-NEXT:    add a0, s2, a0
+; RV64-ZIMOP-NEXT:    addw a0, s0, a0
+; RV64-ZIMOP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    .cfi_restore ra
+; RV64-ZIMOP-NEXT:    .cfi_restore s0
+; RV64-ZIMOP-NEXT:    .cfi_restore s1
+; RV64-ZIMOP-NEXT:    .cfi_restore s2
+; RV64-ZIMOP-NEXT:    addi sp, sp, 32
+; RV64-ZIMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZIMOP-NEXT:    mop.r.28 ra
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f4_both:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    c.mop.1 ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    .cfi_offset ra, -4
+; RV32-ZCMOP-NEXT:    .cfi_offset s0, -8
+; RV32-ZCMOP-NEXT:    .cfi_offset s1, -12
+; RV32-ZCMOP-NEXT:    .cfi_offset s2, -16
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s2, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s1, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    mv s0, a0
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    add s1, s1, s2
+; RV32-ZCMOP-NEXT:    add a0, a0, s0
+; RV32-ZCMOP-NEXT:    add a0, a0, s1
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    .cfi_restore ra
+; RV32-ZCMOP-NEXT:    .cfi_restore s0
+; RV32-ZCMOP-NEXT:    .cfi_restore s1
+; RV32-ZCMOP-NEXT:    .cfi_restore s2
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV32-ZCMOP-NEXT:    mop.r.28 ra
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f4_both:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    c.mop.1 ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, -32
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 32
+; RV64-ZCMOP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    .cfi_offset ra, -8
+; RV64-ZCMOP-NEXT:    .cfi_offset s0, -16
+; RV64-ZCMOP-NEXT:    .cfi_offset s1, -24
+; RV64-ZCMOP-NEXT:    .cfi_offset s2, -32
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s2, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s1, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    mv s0, a0
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    add s1, s1, s2
+; RV64-ZCMOP-NEXT:    add a0, a0, s0
+; RV64-ZCMOP-NEXT:    addw a0, a0, s1
+; RV64-ZCMOP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    .cfi_restore ra
+; RV64-ZCMOP-NEXT:    .cfi_restore s0
+; RV64-ZCMOP-NEXT:    .cfi_restore s1
+; RV64-ZCMOP-NEXT:    .cfi_restore s2
+; RV64-ZCMOP-NEXT:    addi sp, sp, 32
+; RV64-ZCMOP-NEXT:    .cfi_def_cfa_offset 0
+; RV64-ZCMOP-NEXT:    mop.r.28 ra
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f4_both:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    sspush ra
@@ -955,6 +1799,50 @@ define i32 @f5_both() "hw-shadow-stack" shadowcallstack nounwind {
 ; RV64-NEXT:    addi gp, gp, -8
 ; RV64-NEXT:    ret
 ;
+; RV32-ZIMOP-LABEL: f5_both:
+; RV32-ZIMOP:       # %bb.0:
+; RV32-ZIMOP-NEXT:    mop.rr.7 ra
+; RV32-ZIMOP-NEXT:    addi sp, sp, -16
+; RV32-ZIMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZIMOP-NEXT:    call bar
+; RV32-ZIMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZIMOP-NEXT:    addi sp, sp, 16
+; RV32-ZIMOP-NEXT:    mop.r.28 ra
+; RV32-ZIMOP-NEXT:    ret
+;
+; RV64-ZIMOP-LABEL: f5_both:
+; RV64-ZIMOP:       # %bb.0:
+; RV64-ZIMOP-NEXT:    mop.rr.7 ra
+; RV64-ZIMOP-NEXT:    addi sp, sp, -16
+; RV64-ZIMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZIMOP-NEXT:    call bar
+; RV64-ZIMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZIMOP-NEXT:    addi sp, sp, 16
+; RV64-ZIMOP-NEXT:    mop.r.28 ra
+; RV64-ZIMOP-NEXT:    ret
+;
+; RV32-ZCMOP-LABEL: f5_both:
+; RV32-ZCMOP:       # %bb.0:
+; RV32-ZCMOP-NEXT:    c.mop.1 ra
+; RV32-ZCMOP-NEXT:    addi sp, sp, -16
+; RV32-ZCMOP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZCMOP-NEXT:    call bar
+; RV32-ZCMOP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZCMOP-NEXT:    addi sp, sp, 16
+; RV32-ZCMOP-NEXT:    mop.r.28 ra
+; RV32-ZCMOP-NEXT:    ret
+;
+; RV64-ZCMOP-LABEL: f5_both:
+; RV64-ZCMOP:       # %bb.0:
+; RV64-ZCMOP-NEXT:    c.mop.1 ra
+; RV64-ZCMOP-NEXT:    addi sp, sp, -16
+; RV64-ZCMOP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZCMOP-NEXT:    call bar
+; RV64-ZCMOP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZCMOP-NEXT:    addi sp, sp, 16
+; RV64-ZCMOP-NEXT:    mop.r.28 ra
+; RV64-ZCMOP-NEXT:    ret
+;
 ; RV32-ZICFISS-LABEL: f5_both:
 ; RV32-ZICFISS:       # %bb.0:
 ; RV32-ZICFISS-NEXT:    sspush ra



More information about the llvm-commits mailing list