[llvm] [RISCV] Support PreserveMost calling convention (PR #148214)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 14 20:47:26 PDT 2025


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/148214

>From e3a2e57718e4176606580fc6ca8a7b9b0d0ffd5c Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Fri, 11 Jul 2025 18:55:49 +0800
Subject: [PATCH 1/2] [RISCV] Support PreserveMost calling convention

This adds the simplest implementation of `PreserveMost` calling
convention and we preserve `x5-x31` registers.

Fixes #148147.
---
 llvm/docs/LangRef.rst                         |   2 +
 llvm/lib/Target/RISCV/RISCVCallingConv.td     |   2 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   1 +
 llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp   |   2 +
 .../RISCV/calling-conv-preserve-most.ll       | 165 ++++++++++++++++++
 5 files changed, 172 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index d2a1821efd698..40688d72870fb 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -413,6 +413,8 @@ added in the future:
     - On AArch64 the callee preserve all general purpose registers, except
       X0-X8 and X16-X18. Not allowed with ``nest``.
 
+    - On RISC-V the callee preserve x5-x31 registers.
+
     The idea behind this convention is to support calls to runtime functions
     that have a hot path and a cold path. The hot path is usually a small piece
     of code that doesn't use many registers. The cold path might need to call out to
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index cbf039edec273..90e5019019bed 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -93,3 +93,5 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt,
 // Same as CSR_XLEN_F64_V_Interrupt, but excluding X16-X31.
 def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt,
                                                    (sequence "X%u", 16, 31))>;
+
+def CSR_RT_MostRegs : CalleeSavedRegs<(add (sequence "X%u", 5, 31))>;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 456f3aedbf034..6feb53202b7f2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22205,6 +22205,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
   case CallingConv::C:
   case CallingConv::Fast:
   case CallingConv::SPIR_KERNEL:
+  case CallingConv::PreserveMost:
   case CallingConv::GRAAL:
   case CallingConv::RISCV_VectorCall:
 #define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN:
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 540412366026b..44c7ca3587cc1 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -68,6 +68,8 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
     return CSR_NoRegs_SaveList;
+  if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
+    return CSR_RT_MostRegs_SaveList;
   if (MF->getFunction().hasFnAttribute("interrupt")) {
     if (Subtarget.hasVInstructions()) {
       if (Subtarget.hasStdExtD())
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
new file mode 100644
index 0000000000000..a0345363601a5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
@@ -0,0 +1,165 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64
+
+; Check the PreserveMost calling convention works.
+
+declare void @standard_cc_func()
+declare preserve_mostcc void @preserve_mostcc_func()
+
+define preserve_mostcc void @preserve_mostcc1() nounwind {
+; RV32-LABEL: preserve_mostcc1:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw t0, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t1, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t2, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a0, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a1, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a2, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a3, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a4, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a5, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a6, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a7, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t3, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t4, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t5, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t6, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT:    call standard_cc_func
+; RV32-NEXT:    lw t0, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t1, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t2, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a1, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a3, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a6, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a7, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t3, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t4, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t5, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t6, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: preserve_mostcc1:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi sp, sp, -128
+; RV64-NEXT:    sd t0, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t1, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t2, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a1, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a2, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a3, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a4, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a5, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a6, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a7, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t3, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    call standard_cc_func
+; RV64-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a1, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a2, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a3, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a4, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a5, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a6, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a7, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t3, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    ret
+entry:
+  call void @standard_cc_func()
+  ret void
+}
+
+define preserve_mostcc void @preserve_mostcc2() nounwind {
+; RV32-LABEL: preserve_mostcc2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw t0, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t1, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t2, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a0, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a1, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a2, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a3, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a4, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a5, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a6, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a7, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t3, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t4, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t5, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t6, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT:    call preserve_mostcc_func
+; RV32-NEXT:    lw t0, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t1, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t2, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a1, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a3, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a6, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a7, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t3, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t4, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t5, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t6, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: preserve_mostcc2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -128
+; RV64-NEXT:    sd t0, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t1, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t2, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a1, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a2, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a3, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a4, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a5, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a6, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd a7, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t3, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    call preserve_mostcc_func
+; RV64-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a1, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a2, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a3, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a4, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a5, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a6, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a7, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t3, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    ret
+  call preserve_mostcc void @preserve_mostcc_func()
+  ret void
+}

>From a5812f1bc541bdfed4be3cacdb13462d653080e4 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Tue, 15 Jul 2025 11:46:48 +0800
Subject: [PATCH 2/2] Add E and getCallPreservedMask

---
 llvm/lib/Target/RISCV/RISCVCallingConv.td     |   4 +-
 llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp   |  12 +-
 .../RISCV/calling-conv-preserve-most.ll       | 314 ++++++++++--------
 3 files changed, 181 insertions(+), 149 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index 90e5019019bed..056f3d826d7cd 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -94,4 +94,6 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt,
 def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt,
                                                    (sequence "X%u", 16, 31))>;
 
-def CSR_RT_MostRegs : CalleeSavedRegs<(add (sequence "X%u", 5, 31))>;
+def CSR_RT_MostRegs : CalleeSavedRegs<(add X1, (sequence "X%u", 5, 31))>;
+def CSR_RT_MostRegs_RVE : CalleeSavedRegs<(sub CSR_RT_MostRegs,
+                                               (sequence "X%u", 16, 31))>;
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 44c7ca3587cc1..816fd95a55ed0 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -11,6 +11,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "RISCVRegisterInfo.h"
+#include "MCTargetDesc/RISCVBaseInfo.h"
 #include "RISCV.h"
 #include "RISCVSubtarget.h"
 #include "llvm/ADT/SmallSet.h"
@@ -69,7 +70,8 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
     return CSR_NoRegs_SaveList;
   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
-    return CSR_RT_MostRegs_SaveList;
+    return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
+                                  : CSR_RT_MostRegs_SaveList;
   if (MF->getFunction().hasFnAttribute("interrupt")) {
     if (Subtarget.hasVInstructions()) {
       if (Subtarget.hasStdExtD())
@@ -813,7 +815,13 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
 
   if (CC == CallingConv::GHC)
     return CSR_NoRegs_RegMask;
-  switch (Subtarget.getTargetABI()) {
+  RISCVABI::ABI ABI = Subtarget.getTargetABI();
+  if (CC == CallingConv::PreserveMost) {
+    if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+      return CSR_RT_MostRegs_RVE_RegMask;
+    return CSR_RT_MostRegs_RegMask;
+  }
+  switch (ABI) {
   default:
     llvm_unreachable("Unrecognized ABI");
   case RISCVABI::ABI_ILP32E:
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
index a0345363601a5..dd8ef7ac55892 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32
-; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+e -target-abi ilp32e < %s | FileCheck %s -check-prefix=RV32E
+; RUN: llc -mtriple=riscv64 -mattr=+e -target-abi lp64e < %s | FileCheck %s -check-prefix=RV64E
 
 ; Check the PreserveMost calling convention works.
 
@@ -8,158 +10,178 @@ declare void @standard_cc_func()
 declare preserve_mostcc void @preserve_mostcc_func()
 
 define preserve_mostcc void @preserve_mostcc1() nounwind {
-; RV32-LABEL: preserve_mostcc1:
-; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi sp, sp, -64
-; RV32-NEXT:    sw t0, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t1, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t2, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a0, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a1, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a2, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a3, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a4, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a5, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a6, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a7, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t3, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t4, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t5, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t6, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    call standard_cc_func
-; RV32-NEXT:    lw t0, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t1, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t2, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a1, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a3, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a6, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a7, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t3, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t4, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t5, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t6, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 64
-; RV32-NEXT:    ret
+; RV32I-LABEL: preserve_mostcc1:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    addi sp, sp, -64
+; RV32I-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t1, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t2, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a1, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a2, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a3, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a4, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a6, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a7, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call standard_cc_func
+; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t2, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a1, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a7, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 64
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: preserve_mostcc1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi sp, sp, -128
-; RV64-NEXT:    sd t0, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t1, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t2, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a1, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a2, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a3, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a4, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a5, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a6, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a7, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t3, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    call standard_cc_func
-; RV64-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a1, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a2, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a3, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a4, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a5, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a6, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a7, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t3, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 128
-; RV64-NEXT:    ret
+; RV64I-LABEL: preserve_mostcc1:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    addi sp, sp, -128
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t0, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t1, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t2, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a1, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a2, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a3, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a4, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a5, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a6, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a7, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t5, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t6, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call standard_cc_func
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t0, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t1, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t2, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a1, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a2, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a3, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a4, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a5, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a6, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a7, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t6, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 128
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc1:
+; RV32E:       # %bb.0: # %entry
+; RV32E-NEXT:    addi sp, sp, -40
+; RV32E-NEXT:    sw ra, 36(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw t0, 32(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw t1, 28(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw t2, 24(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a1, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a2, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a3, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a4, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a5, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call standard_cc_func
+; RV32E-NEXT:    lw ra, 36(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t0, 32(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t1, 28(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t2, 24(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a1, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a2, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a3, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a4, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a5, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 40
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc1:
+; RV64E:       # %bb.0: # %entry
+; RV64E-NEXT:    addi sp, sp, -80
+; RV64E-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd t0, 64(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd t1, 56(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd t2, 48(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a1, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a2, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a3, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a4, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a5, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call standard_cc_func
+; RV64E-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t0, 64(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t1, 56(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t2, 48(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a1, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a2, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a3, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a4, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a5, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 80
+; RV64E-NEXT:    ret
 entry:
   call void @standard_cc_func()
   ret void
 }
 
 define preserve_mostcc void @preserve_mostcc2() nounwind {
-; RV32-LABEL: preserve_mostcc2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -64
-; RV32-NEXT:    sw t0, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t1, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t2, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a0, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a1, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a2, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a3, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a4, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a5, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a6, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a7, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t3, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t4, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t5, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw t6, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    call preserve_mostcc_func
-; RV32-NEXT:    lw t0, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t1, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t2, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a1, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a3, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a6, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a7, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t3, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t4, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t5, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw t6, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 64
-; RV32-NEXT:    ret
+; RV32I-LABEL: preserve_mostcc2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call preserve_mostcc_func
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call preserve_mostcc_func
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc2:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -4
+; RV32E-NEXT:    sw ra, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call preserve_mostcc_func
+; RV32E-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 4
+; RV32E-NEXT:    ret
 ;
-; RV64-LABEL: preserve_mostcc2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -128
-; RV64-NEXT:    sd t0, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t1, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t2, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a1, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a2, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a3, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a4, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a5, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a6, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd a7, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t3, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    call preserve_mostcc_func
-; RV64-NEXT:    ld t0, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t1, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t2, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a1, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a2, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a3, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a4, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a5, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a6, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld a7, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t3, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 128
-; RV64-NEXT:    ret
+; RV64E-LABEL: preserve_mostcc2:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -8
+; RV64E-NEXT:    sd ra, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call preserve_mostcc_func
+; RV64E-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 8
+; RV64E-NEXT:    ret
   call preserve_mostcc void @preserve_mostcc_func()
   ret void
 }



More information about the llvm-commits mailing list