[llvm] [RISCV][NFC] Remove nounwind from push/pop tests (PR #125939)
Sam Elliott via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 5 14:56:13 PST 2025
https://github.com/lenary updated https://github.com/llvm/llvm-project/pull/125939
>From 2277177aaf37874cae8917fed76f75080871d563 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Wed, 5 Feb 2025 13:42:20 -0800
Subject: [PATCH] [RISCV][NFC] Remove nounwind from push/pop tests
These tests are for frame handling code with push/pop. To increase
coverage of CFI/Unwind info, this removes the `nounwind` annotations and
regenerates the checks for this test.
---
llvm/test/CodeGen/RISCV/callee-saved-gprs.ll | 448 +++++++++-
llvm/test/CodeGen/RISCV/push-pop-popret.ll | 816 +++++++++++++++++-
.../RISCV/rvv/rv32-spill-vector-csr.ll | 21 +-
3 files changed, 1274 insertions(+), 11 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 90a1ebec5abffe..874cf897470e70 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -37,10 +37,11 @@
; This function tests that RISCVRegisterInfo::getCalleeSavedRegs returns
; something appropriate.
-define void @callee() nounwind {
+define void @callee() {
; RV32I-LABEL: callee:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -80
+; RV32I-NEXT: .cfi_def_cfa_offset 80
; RV32I-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
@@ -54,6 +55,19 @@ define void @callee() nounwind {
; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: .cfi_offset s4, -24
+; RV32I-NEXT: .cfi_offset s5, -28
+; RV32I-NEXT: .cfi_offset s6, -32
+; RV32I-NEXT: .cfi_offset s7, -36
+; RV32I-NEXT: .cfi_offset s8, -40
+; RV32I-NEXT: .cfi_offset s9, -44
+; RV32I-NEXT: .cfi_offset s10, -48
+; RV32I-NEXT: .cfi_offset s11, -52
; RV32I-NEXT: lui a7, %hi(var)
; RV32I-NEXT: lw a0, %lo(var)(a7)
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
@@ -145,15 +159,33 @@ define void @callee() nounwind {
; RV32I-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
+; RV32I-NEXT: .cfi_restore s5
+; RV32I-NEXT: .cfi_restore s6
+; RV32I-NEXT: .cfi_restore s7
+; RV32I-NEXT: .cfi_restore s8
+; RV32I-NEXT: .cfi_restore s9
+; RV32I-NEXT: .cfi_restore s10
+; RV32I-NEXT: .cfi_restore s11
; RV32I-NEXT: addi sp, sp, 80
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV32I-ILP32E-LABEL: callee:
; RV32I-ILP32E: # %bb.0:
; RV32I-ILP32E-NEXT: addi sp, sp, -36
+; RV32I-ILP32E-NEXT: .cfi_def_cfa_offset 36
; RV32I-ILP32E-NEXT: sw ra, 32(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
+; RV32I-ILP32E-NEXT: .cfi_offset ra, -4
+; RV32I-ILP32E-NEXT: .cfi_offset s0, -8
+; RV32I-ILP32E-NEXT: .cfi_offset s1, -12
; RV32I-ILP32E-NEXT: lui a7, %hi(var)
; RV32I-ILP32E-NEXT: lw a0, %lo(var)(a7)
; RV32I-ILP32E-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
@@ -235,12 +267,17 @@ define void @callee() nounwind {
; RV32I-ILP32E-NEXT: lw ra, 32(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s0, 28(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s1, 24(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT: .cfi_restore ra
+; RV32I-ILP32E-NEXT: .cfi_restore s0
+; RV32I-ILP32E-NEXT: .cfi_restore s1
; RV32I-ILP32E-NEXT: addi sp, sp, 36
+; RV32I-ILP32E-NEXT: .cfi_def_cfa_offset 0
; RV32I-ILP32E-NEXT: ret
;
; RV32I-WITH-FP-LABEL: callee:
; RV32I-WITH-FP: # %bb.0:
; RV32I-WITH-FP-NEXT: addi sp, sp, -80
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa_offset 80
; RV32I-WITH-FP-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
@@ -254,7 +291,21 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT: .cfi_offset ra, -4
+; RV32I-WITH-FP-NEXT: .cfi_offset s0, -8
+; RV32I-WITH-FP-NEXT: .cfi_offset s1, -12
+; RV32I-WITH-FP-NEXT: .cfi_offset s2, -16
+; RV32I-WITH-FP-NEXT: .cfi_offset s3, -20
+; RV32I-WITH-FP-NEXT: .cfi_offset s4, -24
+; RV32I-WITH-FP-NEXT: .cfi_offset s5, -28
+; RV32I-WITH-FP-NEXT: .cfi_offset s6, -32
+; RV32I-WITH-FP-NEXT: .cfi_offset s7, -36
+; RV32I-WITH-FP-NEXT: .cfi_offset s8, -40
+; RV32I-WITH-FP-NEXT: .cfi_offset s9, -44
+; RV32I-WITH-FP-NEXT: .cfi_offset s10, -48
+; RV32I-WITH-FP-NEXT: .cfi_offset s11, -52
; RV32I-WITH-FP-NEXT: addi s0, sp, 80
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV32I-WITH-FP-NEXT: lui t0, %hi(var)
; RV32I-WITH-FP-NEXT: lw a0, %lo(var)(t0)
; RV32I-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
@@ -335,6 +386,7 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: sw a0, %lo(var+4)(t0)
; RV32I-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: sw a0, %lo(var)(t0)
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa sp, 80
; RV32I-WITH-FP-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
@@ -348,12 +400,40 @@ define void @callee() nounwind {
; RV32I-WITH-FP-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT: .cfi_restore ra
+; RV32I-WITH-FP-NEXT: .cfi_restore s0
+; RV32I-WITH-FP-NEXT: .cfi_restore s1
+; RV32I-WITH-FP-NEXT: .cfi_restore s2
+; RV32I-WITH-FP-NEXT: .cfi_restore s3
+; RV32I-WITH-FP-NEXT: .cfi_restore s4
+; RV32I-WITH-FP-NEXT: .cfi_restore s5
+; RV32I-WITH-FP-NEXT: .cfi_restore s6
+; RV32I-WITH-FP-NEXT: .cfi_restore s7
+; RV32I-WITH-FP-NEXT: .cfi_restore s8
+; RV32I-WITH-FP-NEXT: .cfi_restore s9
+; RV32I-WITH-FP-NEXT: .cfi_restore s10
+; RV32I-WITH-FP-NEXT: .cfi_restore s11
; RV32I-WITH-FP-NEXT: addi sp, sp, 80
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV32I-WITH-FP-NEXT: ret
;
; RV32IZCMP-LABEL: callee:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96
+; RV32IZCMP-NEXT: .cfi_offset ra, -52
+; RV32IZCMP-NEXT: .cfi_offset s0, -48
+; RV32IZCMP-NEXT: .cfi_offset s1, -44
+; RV32IZCMP-NEXT: .cfi_offset s2, -40
+; RV32IZCMP-NEXT: .cfi_offset s3, -36
+; RV32IZCMP-NEXT: .cfi_offset s4, -32
+; RV32IZCMP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-NEXT: .cfi_offset s6, -24
+; RV32IZCMP-NEXT: .cfi_offset s7, -20
+; RV32IZCMP-NEXT: .cfi_offset s8, -16
+; RV32IZCMP-NEXT: .cfi_offset s9, -12
+; RV32IZCMP-NEXT: .cfi_offset s10, -8
+; RV32IZCMP-NEXT: .cfi_offset s11, -4
; RV32IZCMP-NEXT: lui t0, %hi(var)
; RV32IZCMP-NEXT: lw a0, %lo(var)(t0)
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
@@ -437,6 +517,7 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-LABEL: callee:
; RV32IZCMP-WITH-FP: # %bb.0:
; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, -80
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 80
; RV32IZCMP-WITH-FP-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
@@ -450,7 +531,21 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s1, -12
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s2, -16
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s3, -20
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s4, -24
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s6, -32
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s7, -36
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s8, -40
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s9, -44
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s10, -48
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s11, -52
; RV32IZCMP-WITH-FP-NEXT: addi s0, sp, 80
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IZCMP-WITH-FP-NEXT: lui t1, %hi(var)
; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(t1)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
@@ -531,6 +626,7 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(t1)
; RV32IZCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(t1)
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 80
; RV32IZCMP-WITH-FP-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
@@ -544,12 +640,27 @@ define void @callee() nounwind {
; RV32IZCMP-WITH-FP-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore ra
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s0
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s1
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s2
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s3
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s4
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s5
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s6
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s7
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s8
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s9
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s10
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s11
; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, 80
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-WITH-FP-NEXT: ret
;
; RV64I-LABEL: callee:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -160
+; RV64I-NEXT: .cfi_def_cfa_offset 160
; RV64I-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
@@ -563,6 +674,19 @@ define void @callee() nounwind {
; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: .cfi_offset s3, -40
+; RV64I-NEXT: .cfi_offset s4, -48
+; RV64I-NEXT: .cfi_offset s5, -56
+; RV64I-NEXT: .cfi_offset s6, -64
+; RV64I-NEXT: .cfi_offset s7, -72
+; RV64I-NEXT: .cfi_offset s8, -80
+; RV64I-NEXT: .cfi_offset s9, -88
+; RV64I-NEXT: .cfi_offset s10, -96
+; RV64I-NEXT: .cfi_offset s11, -104
; RV64I-NEXT: lui a7, %hi(var)
; RV64I-NEXT: lw a0, %lo(var)(a7)
; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
@@ -654,15 +778,33 @@ define void @callee() nounwind {
; RV64I-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
+; RV64I-NEXT: .cfi_restore s0
+; RV64I-NEXT: .cfi_restore s1
+; RV64I-NEXT: .cfi_restore s2
+; RV64I-NEXT: .cfi_restore s3
+; RV64I-NEXT: .cfi_restore s4
+; RV64I-NEXT: .cfi_restore s5
+; RV64I-NEXT: .cfi_restore s6
+; RV64I-NEXT: .cfi_restore s7
+; RV64I-NEXT: .cfi_restore s8
+; RV64I-NEXT: .cfi_restore s9
+; RV64I-NEXT: .cfi_restore s10
+; RV64I-NEXT: .cfi_restore s11
; RV64I-NEXT: addi sp, sp, 160
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV64I-LP64E-LABEL: callee:
; RV64I-LP64E: # %bb.0:
; RV64I-LP64E-NEXT: addi sp, sp, -72
+; RV64I-LP64E-NEXT: .cfi_def_cfa_offset 72
; RV64I-LP64E-NEXT: sd ra, 64(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s0, 56(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s1, 48(sp) # 8-byte Folded Spill
+; RV64I-LP64E-NEXT: .cfi_offset ra, -8
+; RV64I-LP64E-NEXT: .cfi_offset s0, -16
+; RV64I-LP64E-NEXT: .cfi_offset s1, -24
; RV64I-LP64E-NEXT: lui a7, %hi(var)
; RV64I-LP64E-NEXT: lw a0, %lo(var)(a7)
; RV64I-LP64E-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
@@ -744,12 +886,17 @@ define void @callee() nounwind {
; RV64I-LP64E-NEXT: ld ra, 64(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s0, 56(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s1, 48(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT: .cfi_restore ra
+; RV64I-LP64E-NEXT: .cfi_restore s0
+; RV64I-LP64E-NEXT: .cfi_restore s1
; RV64I-LP64E-NEXT: addi sp, sp, 72
+; RV64I-LP64E-NEXT: .cfi_def_cfa_offset 0
; RV64I-LP64E-NEXT: ret
;
; RV64I-WITH-FP-LABEL: callee:
; RV64I-WITH-FP: # %bb.0:
; RV64I-WITH-FP-NEXT: addi sp, sp, -160
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa_offset 160
; RV64I-WITH-FP-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
@@ -763,7 +910,21 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT: .cfi_offset ra, -8
+; RV64I-WITH-FP-NEXT: .cfi_offset s0, -16
+; RV64I-WITH-FP-NEXT: .cfi_offset s1, -24
+; RV64I-WITH-FP-NEXT: .cfi_offset s2, -32
+; RV64I-WITH-FP-NEXT: .cfi_offset s3, -40
+; RV64I-WITH-FP-NEXT: .cfi_offset s4, -48
+; RV64I-WITH-FP-NEXT: .cfi_offset s5, -56
+; RV64I-WITH-FP-NEXT: .cfi_offset s6, -64
+; RV64I-WITH-FP-NEXT: .cfi_offset s7, -72
+; RV64I-WITH-FP-NEXT: .cfi_offset s8, -80
+; RV64I-WITH-FP-NEXT: .cfi_offset s9, -88
+; RV64I-WITH-FP-NEXT: .cfi_offset s10, -96
+; RV64I-WITH-FP-NEXT: .cfi_offset s11, -104
; RV64I-WITH-FP-NEXT: addi s0, sp, 160
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV64I-WITH-FP-NEXT: lui t0, %hi(var)
; RV64I-WITH-FP-NEXT: lw a0, %lo(var)(t0)
; RV64I-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
@@ -844,6 +1005,7 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: sw a0, %lo(var+4)(t0)
; RV64I-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: sw a0, %lo(var)(t0)
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa sp, 160
; RV64I-WITH-FP-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
@@ -857,12 +1019,40 @@ define void @callee() nounwind {
; RV64I-WITH-FP-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT: .cfi_restore ra
+; RV64I-WITH-FP-NEXT: .cfi_restore s0
+; RV64I-WITH-FP-NEXT: .cfi_restore s1
+; RV64I-WITH-FP-NEXT: .cfi_restore s2
+; RV64I-WITH-FP-NEXT: .cfi_restore s3
+; RV64I-WITH-FP-NEXT: .cfi_restore s4
+; RV64I-WITH-FP-NEXT: .cfi_restore s5
+; RV64I-WITH-FP-NEXT: .cfi_restore s6
+; RV64I-WITH-FP-NEXT: .cfi_restore s7
+; RV64I-WITH-FP-NEXT: .cfi_restore s8
+; RV64I-WITH-FP-NEXT: .cfi_restore s9
+; RV64I-WITH-FP-NEXT: .cfi_restore s10
+; RV64I-WITH-FP-NEXT: .cfi_restore s11
; RV64I-WITH-FP-NEXT: addi sp, sp, 160
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV64I-WITH-FP-NEXT: ret
;
; RV64IZCMP-LABEL: callee:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
+; RV64IZCMP-NEXT: .cfi_offset ra, -104
+; RV64IZCMP-NEXT: .cfi_offset s0, -96
+; RV64IZCMP-NEXT: .cfi_offset s1, -88
+; RV64IZCMP-NEXT: .cfi_offset s2, -80
+; RV64IZCMP-NEXT: .cfi_offset s3, -72
+; RV64IZCMP-NEXT: .cfi_offset s4, -64
+; RV64IZCMP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-NEXT: .cfi_offset s6, -48
+; RV64IZCMP-NEXT: .cfi_offset s7, -40
+; RV64IZCMP-NEXT: .cfi_offset s8, -32
+; RV64IZCMP-NEXT: .cfi_offset s9, -24
+; RV64IZCMP-NEXT: .cfi_offset s10, -16
+; RV64IZCMP-NEXT: .cfi_offset s11, -8
; RV64IZCMP-NEXT: lui t0, %hi(var)
; RV64IZCMP-NEXT: lw a0, %lo(var)(t0)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
@@ -946,6 +1136,7 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-LABEL: callee:
; RV64IZCMP-WITH-FP: # %bb.0:
; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, -160
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 160
; RV64IZCMP-WITH-FP-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
@@ -959,7 +1150,21 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s1, -24
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s2, -32
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s3, -40
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s4, -48
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s6, -64
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s7, -72
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s8, -80
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s9, -88
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s10, -96
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s11, -104
; RV64IZCMP-WITH-FP-NEXT: addi s0, sp, 160
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IZCMP-WITH-FP-NEXT: lui t1, %hi(var)
; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(t1)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
@@ -1040,6 +1245,7 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(t1)
; RV64IZCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(t1)
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 160
; RV64IZCMP-WITH-FP-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
@@ -1053,7 +1259,21 @@ define void @callee() nounwind {
; RV64IZCMP-WITH-FP-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore ra
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s0
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s1
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s2
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s3
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s4
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s5
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s6
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s7
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s8
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s9
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s10
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s11
; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, 160
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-WITH-FP-NEXT: ret
%val = load [32 x i32], ptr @var
store volatile [32 x i32] %val, ptr @var
@@ -1063,10 +1283,11 @@ define void @callee() nounwind {
; This function tests that RISCVRegisterInfo::getCallPreservedMask returns
; something appropriate.
-define void @caller() nounwind {
+define void @caller() {
; RV32I-LABEL: caller:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -144
+; RV32I-NEXT: .cfi_def_cfa_offset 144
; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
@@ -1080,6 +1301,19 @@ define void @caller() nounwind {
; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: .cfi_offset s4, -24
+; RV32I-NEXT: .cfi_offset s5, -28
+; RV32I-NEXT: .cfi_offset s6, -32
+; RV32I-NEXT: .cfi_offset s7, -36
+; RV32I-NEXT: .cfi_offset s8, -40
+; RV32I-NEXT: .cfi_offset s9, -44
+; RV32I-NEXT: .cfi_offset s10, -48
+; RV32I-NEXT: .cfi_offset s11, -52
; RV32I-NEXT: lui s0, %hi(var)
; RV32I-NEXT: lw a0, %lo(var)(s0)
; RV32I-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
@@ -1204,15 +1438,33 @@ define void @caller() nounwind {
; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
+; RV32I-NEXT: .cfi_restore s5
+; RV32I-NEXT: .cfi_restore s6
+; RV32I-NEXT: .cfi_restore s7
+; RV32I-NEXT: .cfi_restore s8
+; RV32I-NEXT: .cfi_restore s9
+; RV32I-NEXT: .cfi_restore s10
+; RV32I-NEXT: .cfi_restore s11
; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV32I-ILP32E-LABEL: caller:
; RV32I-ILP32E: # %bb.0:
; RV32I-ILP32E-NEXT: addi sp, sp, -136
+; RV32I-ILP32E-NEXT: .cfi_def_cfa_offset 136
; RV32I-ILP32E-NEXT: sw ra, 132(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s0, 128(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s1, 124(sp) # 4-byte Folded Spill
+; RV32I-ILP32E-NEXT: .cfi_offset ra, -4
+; RV32I-ILP32E-NEXT: .cfi_offset s0, -8
+; RV32I-ILP32E-NEXT: .cfi_offset s1, -12
; RV32I-ILP32E-NEXT: lui a0, %hi(var)
; RV32I-ILP32E-NEXT: lw a1, %lo(var)(a0)
; RV32I-ILP32E-NEXT: sw a1, 120(sp) # 4-byte Folded Spill
@@ -1346,12 +1598,17 @@ define void @caller() nounwind {
; RV32I-ILP32E-NEXT: lw ra, 132(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s0, 128(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s1, 124(sp) # 4-byte Folded Reload
+; RV32I-ILP32E-NEXT: .cfi_restore ra
+; RV32I-ILP32E-NEXT: .cfi_restore s0
+; RV32I-ILP32E-NEXT: .cfi_restore s1
; RV32I-ILP32E-NEXT: addi sp, sp, 136
+; RV32I-ILP32E-NEXT: .cfi_def_cfa_offset 0
; RV32I-ILP32E-NEXT: ret
;
; RV32I-WITH-FP-LABEL: caller:
; RV32I-WITH-FP: # %bb.0:
; RV32I-WITH-FP-NEXT: addi sp, sp, -144
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa_offset 144
; RV32I-WITH-FP-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
@@ -1365,7 +1622,21 @@ define void @caller() nounwind {
; RV32I-WITH-FP-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
; RV32I-WITH-FP-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT: .cfi_offset ra, -4
+; RV32I-WITH-FP-NEXT: .cfi_offset s0, -8
+; RV32I-WITH-FP-NEXT: .cfi_offset s1, -12
+; RV32I-WITH-FP-NEXT: .cfi_offset s2, -16
+; RV32I-WITH-FP-NEXT: .cfi_offset s3, -20
+; RV32I-WITH-FP-NEXT: .cfi_offset s4, -24
+; RV32I-WITH-FP-NEXT: .cfi_offset s5, -28
+; RV32I-WITH-FP-NEXT: .cfi_offset s6, -32
+; RV32I-WITH-FP-NEXT: .cfi_offset s7, -36
+; RV32I-WITH-FP-NEXT: .cfi_offset s8, -40
+; RV32I-WITH-FP-NEXT: .cfi_offset s9, -44
+; RV32I-WITH-FP-NEXT: .cfi_offset s10, -48
+; RV32I-WITH-FP-NEXT: .cfi_offset s11, -52
; RV32I-WITH-FP-NEXT: addi s0, sp, 144
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV32I-WITH-FP-NEXT: lui s1, %hi(var)
; RV32I-WITH-FP-NEXT: lw a0, %lo(var)(s1)
; RV32I-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
@@ -1479,6 +1750,7 @@ define void @caller() nounwind {
; RV32I-WITH-FP-NEXT: sw a0, %lo(var+4)(s1)
; RV32I-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: sw a0, %lo(var)(s1)
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa sp, 144
; RV32I-WITH-FP-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
@@ -1492,13 +1764,42 @@ define void @caller() nounwind {
; RV32I-WITH-FP-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT: .cfi_restore ra
+; RV32I-WITH-FP-NEXT: .cfi_restore s0
+; RV32I-WITH-FP-NEXT: .cfi_restore s1
+; RV32I-WITH-FP-NEXT: .cfi_restore s2
+; RV32I-WITH-FP-NEXT: .cfi_restore s3
+; RV32I-WITH-FP-NEXT: .cfi_restore s4
+; RV32I-WITH-FP-NEXT: .cfi_restore s5
+; RV32I-WITH-FP-NEXT: .cfi_restore s6
+; RV32I-WITH-FP-NEXT: .cfi_restore s7
+; RV32I-WITH-FP-NEXT: .cfi_restore s8
+; RV32I-WITH-FP-NEXT: .cfi_restore s9
+; RV32I-WITH-FP-NEXT: .cfi_restore s10
+; RV32I-WITH-FP-NEXT: .cfi_restore s11
; RV32I-WITH-FP-NEXT: addi sp, sp, 144
+; RV32I-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV32I-WITH-FP-NEXT: ret
;
; RV32IZCMP-LABEL: caller:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -112
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
+; RV32IZCMP-NEXT: .cfi_offset ra, -52
+; RV32IZCMP-NEXT: .cfi_offset s0, -48
+; RV32IZCMP-NEXT: .cfi_offset s1, -44
+; RV32IZCMP-NEXT: .cfi_offset s2, -40
+; RV32IZCMP-NEXT: .cfi_offset s3, -36
+; RV32IZCMP-NEXT: .cfi_offset s4, -32
+; RV32IZCMP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-NEXT: .cfi_offset s6, -24
+; RV32IZCMP-NEXT: .cfi_offset s7, -20
+; RV32IZCMP-NEXT: .cfi_offset s8, -16
+; RV32IZCMP-NEXT: .cfi_offset s9, -12
+; RV32IZCMP-NEXT: .cfi_offset s10, -8
+; RV32IZCMP-NEXT: .cfi_offset s11, -4
; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160
; RV32IZCMP-NEXT: lui s0, %hi(var)
; RV32IZCMP-NEXT: lw a0, %lo(var)(s0)
; RV32IZCMP-NEXT: sw a0, 92(sp) # 4-byte Folded Spill
@@ -1611,11 +1912,13 @@ define void @caller() nounwind {
; RV32IZCMP-NEXT: lw a0, 92(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, %lo(var)(s0)
; RV32IZCMP-NEXT: addi sp, sp, 48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 112
;
; RV32IZCMP-WITH-FP-LABEL: caller:
; RV32IZCMP-WITH-FP: # %bb.0:
; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, -144
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 144
; RV32IZCMP-WITH-FP-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
@@ -1629,7 +1932,21 @@ define void @caller() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
; RV32IZCMP-WITH-FP-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset ra, -4
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s1, -12
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s2, -16
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s3, -20
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s4, -24
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s6, -32
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s7, -36
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s8, -40
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s9, -44
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s10, -48
+; RV32IZCMP-WITH-FP-NEXT: .cfi_offset s11, -52
; RV32IZCMP-WITH-FP-NEXT: addi s0, sp, 144
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IZCMP-WITH-FP-NEXT: lui s6, %hi(var)
; RV32IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(s6)
; RV32IZCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
@@ -1743,6 +2060,7 @@ define void @caller() nounwind {
; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(s6)
; RV32IZCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(s6)
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 144
; RV32IZCMP-WITH-FP-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
@@ -1756,12 +2074,27 @@ define void @caller() nounwind {
; RV32IZCMP-WITH-FP-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore ra
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s0
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s1
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s2
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s3
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s4
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s5
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s6
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s7
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s8
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s9
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s10
+; RV32IZCMP-WITH-FP-NEXT: .cfi_restore s11
; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, 144
+; RV32IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-WITH-FP-NEXT: ret
;
; RV64I-LABEL: caller:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -288
+; RV64I-NEXT: .cfi_def_cfa_offset 288
; RV64I-NEXT: sd ra, 280(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 272(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 264(sp) # 8-byte Folded Spill
@@ -1775,6 +2108,19 @@ define void @caller() nounwind {
; RV64I-NEXT: sd s9, 200(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s10, 192(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s11, 184(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: .cfi_offset s3, -40
+; RV64I-NEXT: .cfi_offset s4, -48
+; RV64I-NEXT: .cfi_offset s5, -56
+; RV64I-NEXT: .cfi_offset s6, -64
+; RV64I-NEXT: .cfi_offset s7, -72
+; RV64I-NEXT: .cfi_offset s8, -80
+; RV64I-NEXT: .cfi_offset s9, -88
+; RV64I-NEXT: .cfi_offset s10, -96
+; RV64I-NEXT: .cfi_offset s11, -104
; RV64I-NEXT: lui s0, %hi(var)
; RV64I-NEXT: lw a0, %lo(var)(s0)
; RV64I-NEXT: sd a0, 176(sp) # 8-byte Folded Spill
@@ -1899,15 +2245,33 @@ define void @caller() nounwind {
; RV64I-NEXT: ld s9, 200(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s10, 192(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s11, 184(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
+; RV64I-NEXT: .cfi_restore s0
+; RV64I-NEXT: .cfi_restore s1
+; RV64I-NEXT: .cfi_restore s2
+; RV64I-NEXT: .cfi_restore s3
+; RV64I-NEXT: .cfi_restore s4
+; RV64I-NEXT: .cfi_restore s5
+; RV64I-NEXT: .cfi_restore s6
+; RV64I-NEXT: .cfi_restore s7
+; RV64I-NEXT: .cfi_restore s8
+; RV64I-NEXT: .cfi_restore s9
+; RV64I-NEXT: .cfi_restore s10
+; RV64I-NEXT: .cfi_restore s11
; RV64I-NEXT: addi sp, sp, 288
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV64I-LP64E-LABEL: caller:
; RV64I-LP64E: # %bb.0:
; RV64I-LP64E-NEXT: addi sp, sp, -272
+; RV64I-LP64E-NEXT: .cfi_def_cfa_offset 272
; RV64I-LP64E-NEXT: sd ra, 264(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s0, 256(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s1, 248(sp) # 8-byte Folded Spill
+; RV64I-LP64E-NEXT: .cfi_offset ra, -8
+; RV64I-LP64E-NEXT: .cfi_offset s0, -16
+; RV64I-LP64E-NEXT: .cfi_offset s1, -24
; RV64I-LP64E-NEXT: lui a0, %hi(var)
; RV64I-LP64E-NEXT: lw a1, %lo(var)(a0)
; RV64I-LP64E-NEXT: sd a1, 240(sp) # 8-byte Folded Spill
@@ -2041,12 +2405,17 @@ define void @caller() nounwind {
; RV64I-LP64E-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s0, 256(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s1, 248(sp) # 8-byte Folded Reload
+; RV64I-LP64E-NEXT: .cfi_restore ra
+; RV64I-LP64E-NEXT: .cfi_restore s0
+; RV64I-LP64E-NEXT: .cfi_restore s1
; RV64I-LP64E-NEXT: addi sp, sp, 272
+; RV64I-LP64E-NEXT: .cfi_def_cfa_offset 0
; RV64I-LP64E-NEXT: ret
;
; RV64I-WITH-FP-LABEL: caller:
; RV64I-WITH-FP: # %bb.0:
; RV64I-WITH-FP-NEXT: addi sp, sp, -288
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa_offset 288
; RV64I-WITH-FP-NEXT: sd ra, 280(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s0, 272(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s1, 264(sp) # 8-byte Folded Spill
@@ -2060,7 +2429,21 @@ define void @caller() nounwind {
; RV64I-WITH-FP-NEXT: sd s9, 200(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s10, 192(sp) # 8-byte Folded Spill
; RV64I-WITH-FP-NEXT: sd s11, 184(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT: .cfi_offset ra, -8
+; RV64I-WITH-FP-NEXT: .cfi_offset s0, -16
+; RV64I-WITH-FP-NEXT: .cfi_offset s1, -24
+; RV64I-WITH-FP-NEXT: .cfi_offset s2, -32
+; RV64I-WITH-FP-NEXT: .cfi_offset s3, -40
+; RV64I-WITH-FP-NEXT: .cfi_offset s4, -48
+; RV64I-WITH-FP-NEXT: .cfi_offset s5, -56
+; RV64I-WITH-FP-NEXT: .cfi_offset s6, -64
+; RV64I-WITH-FP-NEXT: .cfi_offset s7, -72
+; RV64I-WITH-FP-NEXT: .cfi_offset s8, -80
+; RV64I-WITH-FP-NEXT: .cfi_offset s9, -88
+; RV64I-WITH-FP-NEXT: .cfi_offset s10, -96
+; RV64I-WITH-FP-NEXT: .cfi_offset s11, -104
; RV64I-WITH-FP-NEXT: addi s0, sp, 288
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV64I-WITH-FP-NEXT: lui s1, %hi(var)
; RV64I-WITH-FP-NEXT: lw a0, %lo(var)(s1)
; RV64I-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
@@ -2174,6 +2557,7 @@ define void @caller() nounwind {
; RV64I-WITH-FP-NEXT: sw a0, %lo(var+4)(s1)
; RV64I-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: sw a0, %lo(var)(s1)
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa sp, 288
; RV64I-WITH-FP-NEXT: ld ra, 280(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s0, 272(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s1, 264(sp) # 8-byte Folded Reload
@@ -2187,13 +2571,42 @@ define void @caller() nounwind {
; RV64I-WITH-FP-NEXT: ld s9, 200(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s10, 192(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s11, 184(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT: .cfi_restore ra
+; RV64I-WITH-FP-NEXT: .cfi_restore s0
+; RV64I-WITH-FP-NEXT: .cfi_restore s1
+; RV64I-WITH-FP-NEXT: .cfi_restore s2
+; RV64I-WITH-FP-NEXT: .cfi_restore s3
+; RV64I-WITH-FP-NEXT: .cfi_restore s4
+; RV64I-WITH-FP-NEXT: .cfi_restore s5
+; RV64I-WITH-FP-NEXT: .cfi_restore s6
+; RV64I-WITH-FP-NEXT: .cfi_restore s7
+; RV64I-WITH-FP-NEXT: .cfi_restore s8
+; RV64I-WITH-FP-NEXT: .cfi_restore s9
+; RV64I-WITH-FP-NEXT: .cfi_restore s10
+; RV64I-WITH-FP-NEXT: .cfi_restore s11
; RV64I-WITH-FP-NEXT: addi sp, sp, 288
+; RV64I-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV64I-WITH-FP-NEXT: ret
;
; RV64IZCMP-LABEL: caller:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
+; RV64IZCMP-NEXT: .cfi_offset ra, -104
+; RV64IZCMP-NEXT: .cfi_offset s0, -96
+; RV64IZCMP-NEXT: .cfi_offset s1, -88
+; RV64IZCMP-NEXT: .cfi_offset s2, -80
+; RV64IZCMP-NEXT: .cfi_offset s3, -72
+; RV64IZCMP-NEXT: .cfi_offset s4, -64
+; RV64IZCMP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-NEXT: .cfi_offset s6, -48
+; RV64IZCMP-NEXT: .cfi_offset s7, -40
+; RV64IZCMP-NEXT: .cfi_offset s8, -32
+; RV64IZCMP-NEXT: .cfi_offset s9, -24
+; RV64IZCMP-NEXT: .cfi_offset s10, -16
+; RV64IZCMP-NEXT: .cfi_offset s11, -8
; RV64IZCMP-NEXT: addi sp, sp, -128
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288
; RV64IZCMP-NEXT: lui s0, %hi(var)
; RV64IZCMP-NEXT: lw a0, %lo(var)(s0)
; RV64IZCMP-NEXT: sd a0, 168(sp) # 8-byte Folded Spill
@@ -2306,11 +2719,13 @@ define void @caller() nounwind {
; RV64IZCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var)(s0)
; RV64IZCMP-NEXT: addi sp, sp, 128
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
;
; RV64IZCMP-WITH-FP-LABEL: caller:
; RV64IZCMP-WITH-FP: # %bb.0:
; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, -288
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 288
; RV64IZCMP-WITH-FP-NEXT: sd ra, 280(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s0, 272(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s1, 264(sp) # 8-byte Folded Spill
@@ -2324,7 +2739,21 @@ define void @caller() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sd s9, 200(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s10, 192(sp) # 8-byte Folded Spill
; RV64IZCMP-WITH-FP-NEXT: sd s11, 184(sp) # 8-byte Folded Spill
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset ra, -8
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s1, -24
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s2, -32
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s3, -40
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s4, -48
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s6, -64
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s7, -72
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s8, -80
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s9, -88
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s10, -96
+; RV64IZCMP-WITH-FP-NEXT: .cfi_offset s11, -104
; RV64IZCMP-WITH-FP-NEXT: addi s0, sp, 288
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IZCMP-WITH-FP-NEXT: lui s6, %hi(var)
; RV64IZCMP-WITH-FP-NEXT: lw a0, %lo(var)(s6)
; RV64IZCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
@@ -2438,6 +2867,7 @@ define void @caller() nounwind {
; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(s6)
; RV64IZCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: sw a0, %lo(var)(s6)
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 288
; RV64IZCMP-WITH-FP-NEXT: ld ra, 280(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s0, 272(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s1, 264(sp) # 8-byte Folded Reload
@@ -2451,7 +2881,21 @@ define void @caller() nounwind {
; RV64IZCMP-WITH-FP-NEXT: ld s9, 200(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s10, 192(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s11, 184(sp) # 8-byte Folded Reload
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore ra
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s0
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s1
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s2
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s3
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s4
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s5
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s6
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s7
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s8
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s9
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s10
+; RV64IZCMP-WITH-FP-NEXT: .cfi_restore s11
; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, 288
+; RV64IZCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-WITH-FP-NEXT: ret
%val = load [32 x i32], ptr @var
call void @callee()
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 865d55a4dcb95a..5a3b67adfaab11 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -104,7 +104,7 @@ define i32 @foo() {
ret i32 0
}
-define i32 @pushpopret0(i32 signext %size){
+define i32 @pushpopret0(i32 signext %size) {
; RV32IZCMP-LABEL: pushpopret0:
; RV32IZCMP: # %bb.0: # %entry
; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
@@ -1151,10 +1151,11 @@ entry:
declare void @llvm.va_start(ptr)
declare void @llvm.va_end(ptr)
-define i32 @varargs(ptr %fmt, ...) nounwind {
+define i32 @varargs(ptr %fmt, ...) {
; RV32IZCMP-LABEL: varargs:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 48
; RV32IZCMP-NEXT: mv a0, a1
; RV32IZCMP-NEXT: sw a5, 36(sp)
; RV32IZCMP-NEXT: sw a6, 40(sp)
@@ -1166,11 +1167,13 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
; RV32IZCMP-NEXT: addi a1, sp, 24
; RV32IZCMP-NEXT: sw a1, 12(sp)
; RV32IZCMP-NEXT: addi sp, sp, 48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-NEXT: ret
;
; RV64IZCMP-LABEL: varargs:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: addi sp, sp, -80
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 80
; RV64IZCMP-NEXT: sd a1, 24(sp)
; RV64IZCMP-NEXT: addi a0, sp, 28
; RV64IZCMP-NEXT: sd a0, 8(sp)
@@ -1182,11 +1185,13 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
; RV64IZCMP-NEXT: sd a3, 40(sp)
; RV64IZCMP-NEXT: sd a4, 48(sp)
; RV64IZCMP-NEXT: addi sp, sp, 80
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-NEXT: ret
;
; RV32IZCMP-SR-LABEL: varargs:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: addi sp, sp, -48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
; RV32IZCMP-SR-NEXT: mv a0, a1
; RV32IZCMP-SR-NEXT: sw a5, 36(sp)
; RV32IZCMP-SR-NEXT: sw a6, 40(sp)
@@ -1198,11 +1203,13 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
; RV32IZCMP-SR-NEXT: addi a1, sp, 24
; RV32IZCMP-SR-NEXT: sw a1, 12(sp)
; RV32IZCMP-SR-NEXT: addi sp, sp, 48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-SR-NEXT: ret
;
; RV64IZCMP-SR-LABEL: varargs:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: addi sp, sp, -80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
; RV64IZCMP-SR-NEXT: sd a1, 24(sp)
; RV64IZCMP-SR-NEXT: addi a0, sp, 28
; RV64IZCMP-SR-NEXT: sd a0, 8(sp)
@@ -1214,11 +1221,13 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
; RV64IZCMP-SR-NEXT: sd a3, 40(sp)
; RV64IZCMP-SR-NEXT: sd a4, 48(sp)
; RV64IZCMP-SR-NEXT: addi sp, sp, 80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-SR-NEXT: ret
;
; RV32I-LABEL: varargs:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -48
+; RV32I-NEXT: .cfi_def_cfa_offset 48
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: sw a5, 36(sp)
; RV32I-NEXT: sw a6, 40(sp)
@@ -1230,11 +1239,13 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
; RV32I-NEXT: addi a1, sp, 24
; RV32I-NEXT: sw a1, 12(sp)
; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: varargs:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -80
+; RV64I-NEXT: .cfi_def_cfa_offset 80
; RV64I-NEXT: sd a1, 24(sp)
; RV64I-NEXT: addi a0, sp, 28
; RV64I-NEXT: sd a0, 8(sp)
@@ -1246,6 +1257,7 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
; RV64I-NEXT: sd a3, 40(sp)
; RV64I-NEXT: sd a4, 48(sp)
; RV64I-NEXT: addi sp, sp, 80
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
@@ -1259,10 +1271,16 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
@var0 = global [18 x i32] zeroinitializer
-define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
+define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) {
; RV32IZCMP-LABEL: many_args:
; RV32IZCMP: # %bb.0: # %entry
; RV32IZCMP-NEXT: cm.push {ra, s0-s4}, -32
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 32
+; RV32IZCMP-NEXT: .cfi_offset s0, -20
+; RV32IZCMP-NEXT: .cfi_offset s1, -16
+; RV32IZCMP-NEXT: .cfi_offset s2, -12
+; RV32IZCMP-NEXT: .cfi_offset s3, -8
+; RV32IZCMP-NEXT: .cfi_offset s4, -4
; RV32IZCMP-NEXT: lui a0, %hi(var0)
; RV32IZCMP-NEXT: lw a6, %lo(var0)(a0)
; RV32IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
@@ -1306,6 +1324,12 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV64IZCMP-LABEL: many_args:
; RV64IZCMP: # %bb.0: # %entry
; RV64IZCMP-NEXT: cm.push {ra, s0-s4}, -48
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 48
+; RV64IZCMP-NEXT: .cfi_offset s0, -40
+; RV64IZCMP-NEXT: .cfi_offset s1, -32
+; RV64IZCMP-NEXT: .cfi_offset s2, -24
+; RV64IZCMP-NEXT: .cfi_offset s3, -16
+; RV64IZCMP-NEXT: .cfi_offset s4, -8
; RV64IZCMP-NEXT: lui a0, %hi(var0)
; RV64IZCMP-NEXT: lw a6, %lo(var0)(a0)
; RV64IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
@@ -1349,6 +1373,12 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV32IZCMP-SR-LABEL: many_args:
; RV32IZCMP-SR: # %bb.0: # %entry
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s4}, -32
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -20
+; RV32IZCMP-SR-NEXT: .cfi_offset s1, -16
+; RV32IZCMP-SR-NEXT: .cfi_offset s2, -12
+; RV32IZCMP-SR-NEXT: .cfi_offset s3, -8
+; RV32IZCMP-SR-NEXT: .cfi_offset s4, -4
; RV32IZCMP-SR-NEXT: lui a0, %hi(var0)
; RV32IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
; RV32IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
@@ -1392,6 +1422,12 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV64IZCMP-SR-LABEL: many_args:
; RV64IZCMP-SR: # %bb.0: # %entry
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s4}, -48
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -40
+; RV64IZCMP-SR-NEXT: .cfi_offset s1, -32
+; RV64IZCMP-SR-NEXT: .cfi_offset s2, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset s3, -16
+; RV64IZCMP-SR-NEXT: .cfi_offset s4, -8
; RV64IZCMP-SR-NEXT: lui a0, %hi(var0)
; RV64IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
; RV64IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
@@ -1435,11 +1471,17 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV32I-LABEL: many_args:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: .cfi_def_cfa_offset 32
; RV32I-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset s0, -4
+; RV32I-NEXT: .cfi_offset s1, -8
+; RV32I-NEXT: .cfi_offset s2, -12
+; RV32I-NEXT: .cfi_offset s3, -16
+; RV32I-NEXT: .cfi_offset s4, -20
; RV32I-NEXT: lui a0, %hi(var0)
; RV32I-NEXT: lw a1, %lo(var0)(a0)
; RV32I-NEXT: lw a2, %lo(var0+4)(a0)
@@ -1483,17 +1525,29 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV32I-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s4, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: many_args:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: .cfi_def_cfa_offset 48
; RV64I-NEXT: sd s0, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset s0, -8
+; RV64I-NEXT: .cfi_offset s1, -16
+; RV64I-NEXT: .cfi_offset s2, -24
+; RV64I-NEXT: .cfi_offset s3, -32
+; RV64I-NEXT: .cfi_offset s4, -40
; RV64I-NEXT: lui a0, %hi(var0)
; RV64I-NEXT: lw a1, %lo(var0)(a0)
; RV64I-NEXT: lw a2, %lo(var0+4)(a0)
@@ -1537,7 +1591,13 @@ define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
; RV64I-NEXT: ld s2, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s3, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s4, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore s0
+; RV64I-NEXT: .cfi_restore s1
+; RV64I-NEXT: .cfi_restore s2
+; RV64I-NEXT: .cfi_restore s3
+; RV64I-NEXT: .cfi_restore s4
; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
entry:
%val = load [18 x i32], ptr @var0
@@ -1551,11 +1611,16 @@ declare ptr @llvm.stacksave()
declare void @llvm.stackrestore(ptr)
declare void @notdead(ptr)
-define void @alloca(i32 %n) nounwind {
+define void @alloca(i32 %n) {
; RV32IZCMP-LABEL: alloca:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -12
+; RV32IZCMP-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-NEXT: .cfi_offset s1, -4
; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
; RV32IZCMP-NEXT: mv s1, sp
; RV32IZCMP-NEXT: addi a0, a0, 15
; RV32IZCMP-NEXT: andi a0, a0, -16
@@ -1564,12 +1629,18 @@ define void @alloca(i32 %n) nounwind {
; RV32IZCMP-NEXT: call notdead
; RV32IZCMP-NEXT: mv sp, s1
; RV32IZCMP-NEXT: addi sp, s0, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 16
;
; RV64IZCMP-LABEL: alloca:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -32
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 32
+; RV64IZCMP-NEXT: .cfi_offset ra, -24
+; RV64IZCMP-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-NEXT: .cfi_offset s1, -8
; RV64IZCMP-NEXT: addi s0, sp, 32
+; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
; RV64IZCMP-NEXT: mv s1, sp
; RV64IZCMP-NEXT: slli a0, a0, 32
; RV64IZCMP-NEXT: srli a0, a0, 32
@@ -1580,12 +1651,18 @@ define void @alloca(i32 %n) nounwind {
; RV64IZCMP-NEXT: call notdead
; RV64IZCMP-NEXT: mv sp, s1
; RV64IZCMP-NEXT: addi sp, s0, -32
+; RV64IZCMP-NEXT: .cfi_def_cfa sp, 32
; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
;
; RV32IZCMP-SR-LABEL: alloca:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -12
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
+; RV32IZCMP-SR-NEXT: .cfi_offset s1, -4
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
; RV32IZCMP-SR-NEXT: mv s1, sp
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
@@ -1594,12 +1671,18 @@ define void @alloca(i32 %n) nounwind {
; RV32IZCMP-SR-NEXT: call notdead
; RV32IZCMP-SR-NEXT: mv sp, s1
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 16
;
; RV64IZCMP-SR-LABEL: alloca:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
+; RV64IZCMP-SR-NEXT: .cfi_offset s1, -8
; RV64IZCMP-SR-NEXT: addi s0, sp, 32
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
; RV64IZCMP-SR-NEXT: mv s1, sp
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
@@ -1610,15 +1693,21 @@ define void @alloca(i32 %n) nounwind {
; RV64IZCMP-SR-NEXT: call notdead
; RV64IZCMP-SR-NEXT: mv sp, s1
; RV64IZCMP-SR-NEXT: addi sp, s0, -32
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 32
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
;
; RV32I-LABEL: alloca:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: mv s1, sp
; RV32I-NEXT: addi a0, a0, 15
; RV32I-NEXT: andi a0, a0, -16
@@ -1627,19 +1716,29 @@ define void @alloca(i32 %n) nounwind {
; RV32I-NEXT: call notdead
; RV32I-NEXT: mv sp, s1
; RV32I-NEXT: addi sp, s0, -16
+; RV32I-NEXT: .cfi_def_cfa sp, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: alloca:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: .cfi_def_cfa_offset 32
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
; RV64I-NEXT: addi s0, sp, 32
+; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: mv s1, sp
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
@@ -1650,10 +1749,15 @@ define void @alloca(i32 %n) nounwind {
; RV64I-NEXT: call notdead
; RV64I-NEXT: mv sp, s1
; RV64I-NEXT: addi sp, s0, -32
+; RV64I-NEXT: .cfi_def_cfa sp, 32
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
+; RV64I-NEXT: .cfi_restore s0
+; RV64I-NEXT: .cfi_restore s1
; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
%sp = call ptr @llvm.stacksave()
%addr = alloca i8, i32 %n
@@ -1665,11 +1769,14 @@ define void @alloca(i32 %n) nounwind {
declare i32 @foo_test_irq(...)
@var_test_irq = global [32 x i32] zeroinitializer
-define void @foo_with_irq() nounwind "interrupt"="user" {
+define void @foo_with_irq() "interrupt"="user" {
; RV32IZCMP-LABEL: foo_with_irq:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra}, -64
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
; RV32IZCMP-NEXT: addi sp, sp, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80
; RV32IZCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
@@ -1685,6 +1792,21 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: .cfi_offset t0, -20
+; RV32IZCMP-NEXT: .cfi_offset t1, -24
+; RV32IZCMP-NEXT: .cfi_offset t2, -28
+; RV32IZCMP-NEXT: .cfi_offset a0, -32
+; RV32IZCMP-NEXT: .cfi_offset a1, -36
+; RV32IZCMP-NEXT: .cfi_offset a2, -40
+; RV32IZCMP-NEXT: .cfi_offset a3, -44
+; RV32IZCMP-NEXT: .cfi_offset a4, -48
+; RV32IZCMP-NEXT: .cfi_offset a5, -52
+; RV32IZCMP-NEXT: .cfi_offset a6, -56
+; RV32IZCMP-NEXT: .cfi_offset a7, -60
+; RV32IZCMP-NEXT: .cfi_offset t3, -64
+; RV32IZCMP-NEXT: .cfi_offset t4, -68
+; RV32IZCMP-NEXT: .cfi_offset t5, -72
+; RV32IZCMP-NEXT: .cfi_offset t6, -76
; RV32IZCMP-NEXT: call foo_test_irq
; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
@@ -1701,14 +1823,35 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: .cfi_restore t0
+; RV32IZCMP-NEXT: .cfi_restore t1
+; RV32IZCMP-NEXT: .cfi_restore t2
+; RV32IZCMP-NEXT: .cfi_restore a0
+; RV32IZCMP-NEXT: .cfi_restore a1
+; RV32IZCMP-NEXT: .cfi_restore a2
+; RV32IZCMP-NEXT: .cfi_restore a3
+; RV32IZCMP-NEXT: .cfi_restore a4
+; RV32IZCMP-NEXT: .cfi_restore a5
+; RV32IZCMP-NEXT: .cfi_restore a6
+; RV32IZCMP-NEXT: .cfi_restore a7
+; RV32IZCMP-NEXT: .cfi_restore t3
+; RV32IZCMP-NEXT: .cfi_restore t4
+; RV32IZCMP-NEXT: .cfi_restore t5
+; RV32IZCMP-NEXT: .cfi_restore t6
; RV32IZCMP-NEXT: addi sp, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
; RV32IZCMP-NEXT: cm.pop {ra}, 64
+; RV32IZCMP-NEXT: .cfi_restore ra
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-NEXT: mret
;
; RV64IZCMP-LABEL: foo_with_irq:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra}, -64
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
; RV64IZCMP-NEXT: addi sp, sp, -80
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 144
; RV64IZCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
@@ -1724,6 +1867,21 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: .cfi_offset t0, -24
+; RV64IZCMP-NEXT: .cfi_offset t1, -32
+; RV64IZCMP-NEXT: .cfi_offset t2, -40
+; RV64IZCMP-NEXT: .cfi_offset a0, -48
+; RV64IZCMP-NEXT: .cfi_offset a1, -56
+; RV64IZCMP-NEXT: .cfi_offset a2, -64
+; RV64IZCMP-NEXT: .cfi_offset a3, -72
+; RV64IZCMP-NEXT: .cfi_offset a4, -80
+; RV64IZCMP-NEXT: .cfi_offset a5, -88
+; RV64IZCMP-NEXT: .cfi_offset a6, -96
+; RV64IZCMP-NEXT: .cfi_offset a7, -104
+; RV64IZCMP-NEXT: .cfi_offset t3, -112
+; RV64IZCMP-NEXT: .cfi_offset t4, -120
+; RV64IZCMP-NEXT: .cfi_offset t5, -128
+; RV64IZCMP-NEXT: .cfi_offset t6, -136
; RV64IZCMP-NEXT: call foo_test_irq
; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
@@ -1740,14 +1898,35 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: .cfi_restore t0
+; RV64IZCMP-NEXT: .cfi_restore t1
+; RV64IZCMP-NEXT: .cfi_restore t2
+; RV64IZCMP-NEXT: .cfi_restore a0
+; RV64IZCMP-NEXT: .cfi_restore a1
+; RV64IZCMP-NEXT: .cfi_restore a2
+; RV64IZCMP-NEXT: .cfi_restore a3
+; RV64IZCMP-NEXT: .cfi_restore a4
+; RV64IZCMP-NEXT: .cfi_restore a5
+; RV64IZCMP-NEXT: .cfi_restore a6
+; RV64IZCMP-NEXT: .cfi_restore a7
+; RV64IZCMP-NEXT: .cfi_restore t3
+; RV64IZCMP-NEXT: .cfi_restore t4
+; RV64IZCMP-NEXT: .cfi_restore t5
+; RV64IZCMP-NEXT: .cfi_restore t6
; RV64IZCMP-NEXT: addi sp, sp, 80
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
; RV64IZCMP-NEXT: cm.pop {ra}, 64
+; RV64IZCMP-NEXT: .cfi_restore ra
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-NEXT: mret
;
; RV32IZCMP-SR-LABEL: foo_with_irq:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IZCMP-SR-NEXT: addi sp, sp, -16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
; RV32IZCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
@@ -1763,6 +1942,21 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: .cfi_offset t0, -20
+; RV32IZCMP-SR-NEXT: .cfi_offset t1, -24
+; RV32IZCMP-SR-NEXT: .cfi_offset t2, -28
+; RV32IZCMP-SR-NEXT: .cfi_offset a0, -32
+; RV32IZCMP-SR-NEXT: .cfi_offset a1, -36
+; RV32IZCMP-SR-NEXT: .cfi_offset a2, -40
+; RV32IZCMP-SR-NEXT: .cfi_offset a3, -44
+; RV32IZCMP-SR-NEXT: .cfi_offset a4, -48
+; RV32IZCMP-SR-NEXT: .cfi_offset a5, -52
+; RV32IZCMP-SR-NEXT: .cfi_offset a6, -56
+; RV32IZCMP-SR-NEXT: .cfi_offset a7, -60
+; RV32IZCMP-SR-NEXT: .cfi_offset t3, -64
+; RV32IZCMP-SR-NEXT: .cfi_offset t4, -68
+; RV32IZCMP-SR-NEXT: .cfi_offset t5, -72
+; RV32IZCMP-SR-NEXT: .cfi_offset t6, -76
; RV32IZCMP-SR-NEXT: call foo_test_irq
; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
@@ -1779,14 +1973,35 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: .cfi_restore t0
+; RV32IZCMP-SR-NEXT: .cfi_restore t1
+; RV32IZCMP-SR-NEXT: .cfi_restore t2
+; RV32IZCMP-SR-NEXT: .cfi_restore a0
+; RV32IZCMP-SR-NEXT: .cfi_restore a1
+; RV32IZCMP-SR-NEXT: .cfi_restore a2
+; RV32IZCMP-SR-NEXT: .cfi_restore a3
+; RV32IZCMP-SR-NEXT: .cfi_restore a4
+; RV32IZCMP-SR-NEXT: .cfi_restore a5
+; RV32IZCMP-SR-NEXT: .cfi_restore a6
+; RV32IZCMP-SR-NEXT: .cfi_restore a7
+; RV32IZCMP-SR-NEXT: .cfi_restore t3
+; RV32IZCMP-SR-NEXT: .cfi_restore t4
+; RV32IZCMP-SR-NEXT: .cfi_restore t5
+; RV32IZCMP-SR-NEXT: .cfi_restore t6
; RV32IZCMP-SR-NEXT: addi sp, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV32IZCMP-SR-NEXT: cm.pop {ra}, 64
+; RV32IZCMP-SR-NEXT: .cfi_restore ra
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-SR-NEXT: mret
;
; RV64IZCMP-SR-LABEL: foo_with_irq:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
; RV64IZCMP-SR-NEXT: addi sp, sp, -80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 144
; RV64IZCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
@@ -1802,6 +2017,21 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: .cfi_offset t0, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset t1, -32
+; RV64IZCMP-SR-NEXT: .cfi_offset t2, -40
+; RV64IZCMP-SR-NEXT: .cfi_offset a0, -48
+; RV64IZCMP-SR-NEXT: .cfi_offset a1, -56
+; RV64IZCMP-SR-NEXT: .cfi_offset a2, -64
+; RV64IZCMP-SR-NEXT: .cfi_offset a3, -72
+; RV64IZCMP-SR-NEXT: .cfi_offset a4, -80
+; RV64IZCMP-SR-NEXT: .cfi_offset a5, -88
+; RV64IZCMP-SR-NEXT: .cfi_offset a6, -96
+; RV64IZCMP-SR-NEXT: .cfi_offset a7, -104
+; RV64IZCMP-SR-NEXT: .cfi_offset t3, -112
+; RV64IZCMP-SR-NEXT: .cfi_offset t4, -120
+; RV64IZCMP-SR-NEXT: .cfi_offset t5, -128
+; RV64IZCMP-SR-NEXT: .cfi_offset t6, -136
; RV64IZCMP-SR-NEXT: call foo_test_irq
; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
@@ -1818,13 +2048,32 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: .cfi_restore t0
+; RV64IZCMP-SR-NEXT: .cfi_restore t1
+; RV64IZCMP-SR-NEXT: .cfi_restore t2
+; RV64IZCMP-SR-NEXT: .cfi_restore a0
+; RV64IZCMP-SR-NEXT: .cfi_restore a1
+; RV64IZCMP-SR-NEXT: .cfi_restore a2
+; RV64IZCMP-SR-NEXT: .cfi_restore a3
+; RV64IZCMP-SR-NEXT: .cfi_restore a4
+; RV64IZCMP-SR-NEXT: .cfi_restore a5
+; RV64IZCMP-SR-NEXT: .cfi_restore a6
+; RV64IZCMP-SR-NEXT: .cfi_restore a7
+; RV64IZCMP-SR-NEXT: .cfi_restore t3
+; RV64IZCMP-SR-NEXT: .cfi_restore t4
+; RV64IZCMP-SR-NEXT: .cfi_restore t5
+; RV64IZCMP-SR-NEXT: .cfi_restore t6
; RV64IZCMP-SR-NEXT: addi sp, sp, 80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV64IZCMP-SR-NEXT: cm.pop {ra}, 64
+; RV64IZCMP-SR-NEXT: .cfi_restore ra
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-SR-NEXT: mret
;
; RV32I-LABEL: foo_with_irq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: .cfi_def_cfa_offset 64
; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
@@ -1841,6 +2090,22 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset t0, -8
+; RV32I-NEXT: .cfi_offset t1, -12
+; RV32I-NEXT: .cfi_offset t2, -16
+; RV32I-NEXT: .cfi_offset a0, -20
+; RV32I-NEXT: .cfi_offset a1, -24
+; RV32I-NEXT: .cfi_offset a2, -28
+; RV32I-NEXT: .cfi_offset a3, -32
+; RV32I-NEXT: .cfi_offset a4, -36
+; RV32I-NEXT: .cfi_offset a5, -40
+; RV32I-NEXT: .cfi_offset a6, -44
+; RV32I-NEXT: .cfi_offset a7, -48
+; RV32I-NEXT: .cfi_offset t3, -52
+; RV32I-NEXT: .cfi_offset t4, -56
+; RV32I-NEXT: .cfi_offset t5, -60
+; RV32I-NEXT: .cfi_offset t6, -64
; RV32I-NEXT: call foo_test_irq
; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
@@ -1858,12 +2123,30 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore t0
+; RV32I-NEXT: .cfi_restore t1
+; RV32I-NEXT: .cfi_restore t2
+; RV32I-NEXT: .cfi_restore a0
+; RV32I-NEXT: .cfi_restore a1
+; RV32I-NEXT: .cfi_restore a2
+; RV32I-NEXT: .cfi_restore a3
+; RV32I-NEXT: .cfi_restore a4
+; RV32I-NEXT: .cfi_restore a5
+; RV32I-NEXT: .cfi_restore a6
+; RV32I-NEXT: .cfi_restore a7
+; RV32I-NEXT: .cfi_restore t3
+; RV32I-NEXT: .cfi_restore t4
+; RV32I-NEXT: .cfi_restore t5
+; RV32I-NEXT: .cfi_restore t6
; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: mret
;
; RV64I-LABEL: foo_with_irq:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -128
+; RV64I-NEXT: .cfi_def_cfa_offset 128
; RV64I-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
@@ -1880,6 +2163,22 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset t0, -16
+; RV64I-NEXT: .cfi_offset t1, -24
+; RV64I-NEXT: .cfi_offset t2, -32
+; RV64I-NEXT: .cfi_offset a0, -40
+; RV64I-NEXT: .cfi_offset a1, -48
+; RV64I-NEXT: .cfi_offset a2, -56
+; RV64I-NEXT: .cfi_offset a3, -64
+; RV64I-NEXT: .cfi_offset a4, -72
+; RV64I-NEXT: .cfi_offset a5, -80
+; RV64I-NEXT: .cfi_offset a6, -88
+; RV64I-NEXT: .cfi_offset a7, -96
+; RV64I-NEXT: .cfi_offset t3, -104
+; RV64I-NEXT: .cfi_offset t4, -112
+; RV64I-NEXT: .cfi_offset t5, -120
+; RV64I-NEXT: .cfi_offset t6, -128
; RV64I-NEXT: call foo_test_irq
; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
@@ -1897,63 +2196,111 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
+; RV64I-NEXT: .cfi_restore t0
+; RV64I-NEXT: .cfi_restore t1
+; RV64I-NEXT: .cfi_restore t2
+; RV64I-NEXT: .cfi_restore a0
+; RV64I-NEXT: .cfi_restore a1
+; RV64I-NEXT: .cfi_restore a2
+; RV64I-NEXT: .cfi_restore a3
+; RV64I-NEXT: .cfi_restore a4
+; RV64I-NEXT: .cfi_restore a5
+; RV64I-NEXT: .cfi_restore a6
+; RV64I-NEXT: .cfi_restore a7
+; RV64I-NEXT: .cfi_restore t3
+; RV64I-NEXT: .cfi_restore t4
+; RV64I-NEXT: .cfi_restore t5
+; RV64I-NEXT: .cfi_restore t6
; RV64I-NEXT: addi sp, sp, 128
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: mret
%call = call i32 @foo_test_irq()
ret void
}
-define void @foo_no_irq() nounwind{
+define void @foo_no_irq() {
; RV32IZCMP-LABEL: foo_no_irq:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra}, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: .cfi_offset ra, -4
; RV32IZCMP-NEXT: call foo_test_irq
; RV32IZCMP-NEXT: cm.popret {ra}, 16
;
; RV64IZCMP-LABEL: foo_no_irq:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra}, -16
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-NEXT: .cfi_offset ra, -8
; RV64IZCMP-NEXT: call foo_test_irq
; RV64IZCMP-NEXT: cm.popret {ra}, 16
;
; RV32IZCMP-SR-LABEL: foo_no_irq:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra}, -16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IZCMP-SR-NEXT: call foo_test_irq
; RV32IZCMP-SR-NEXT: cm.popret {ra}, 16
;
; RV64IZCMP-SR-LABEL: foo_no_irq:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra}, -16
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
; RV64IZCMP-SR-NEXT: call foo_test_irq
; RV64IZCMP-SR-NEXT: cm.popret {ra}, 16
;
; RV32I-LABEL: foo_no_irq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: .cfi_def_cfa_offset 16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: call foo_test_irq
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: foo_no_irq:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: .cfi_def_cfa_offset 16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: call foo_test_irq
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
%call = call i32 @foo_test_irq()
ret void
}
-define void @callee_with_irq() nounwind "interrupt"="user" {
+define void @callee_with_irq() "interrupt"="user" {
; RV32IZCMP-LABEL: callee_with_irq:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -112
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
+; RV32IZCMP-NEXT: .cfi_offset ra, -52
+; RV32IZCMP-NEXT: .cfi_offset s0, -48
+; RV32IZCMP-NEXT: .cfi_offset s1, -44
+; RV32IZCMP-NEXT: .cfi_offset s2, -40
+; RV32IZCMP-NEXT: .cfi_offset s3, -36
+; RV32IZCMP-NEXT: .cfi_offset s4, -32
+; RV32IZCMP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-NEXT: .cfi_offset s6, -24
+; RV32IZCMP-NEXT: .cfi_offset s7, -20
+; RV32IZCMP-NEXT: .cfi_offset s8, -16
+; RV32IZCMP-NEXT: .cfi_offset s9, -12
+; RV32IZCMP-NEXT: .cfi_offset s10, -8
+; RV32IZCMP-NEXT: .cfi_offset s11, -4
; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160
; RV32IZCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
@@ -1969,6 +2316,21 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: .cfi_offset t0, -68
+; RV32IZCMP-NEXT: .cfi_offset t1, -72
+; RV32IZCMP-NEXT: .cfi_offset t2, -76
+; RV32IZCMP-NEXT: .cfi_offset a0, -80
+; RV32IZCMP-NEXT: .cfi_offset a1, -84
+; RV32IZCMP-NEXT: .cfi_offset a2, -88
+; RV32IZCMP-NEXT: .cfi_offset a3, -92
+; RV32IZCMP-NEXT: .cfi_offset a4, -96
+; RV32IZCMP-NEXT: .cfi_offset a5, -100
+; RV32IZCMP-NEXT: .cfi_offset a6, -104
+; RV32IZCMP-NEXT: .cfi_offset a7, -108
+; RV32IZCMP-NEXT: .cfi_offset t3, -112
+; RV32IZCMP-NEXT: .cfi_offset t4, -116
+; RV32IZCMP-NEXT: .cfi_offset t5, -120
+; RV32IZCMP-NEXT: .cfi_offset t6, -124
; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
@@ -2062,14 +2424,59 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: .cfi_restore t0
+; RV32IZCMP-NEXT: .cfi_restore t1
+; RV32IZCMP-NEXT: .cfi_restore t2
+; RV32IZCMP-NEXT: .cfi_restore a0
+; RV32IZCMP-NEXT: .cfi_restore a1
+; RV32IZCMP-NEXT: .cfi_restore a2
+; RV32IZCMP-NEXT: .cfi_restore a3
+; RV32IZCMP-NEXT: .cfi_restore a4
+; RV32IZCMP-NEXT: .cfi_restore a5
+; RV32IZCMP-NEXT: .cfi_restore a6
+; RV32IZCMP-NEXT: .cfi_restore a7
+; RV32IZCMP-NEXT: .cfi_restore t3
+; RV32IZCMP-NEXT: .cfi_restore t4
+; RV32IZCMP-NEXT: .cfi_restore t5
+; RV32IZCMP-NEXT: .cfi_restore t6
; RV32IZCMP-NEXT: addi sp, sp, 48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112
+; RV32IZCMP-NEXT: .cfi_restore ra
+; RV32IZCMP-NEXT: .cfi_restore s0
+; RV32IZCMP-NEXT: .cfi_restore s1
+; RV32IZCMP-NEXT: .cfi_restore s2
+; RV32IZCMP-NEXT: .cfi_restore s3
+; RV32IZCMP-NEXT: .cfi_restore s4
+; RV32IZCMP-NEXT: .cfi_restore s5
+; RV32IZCMP-NEXT: .cfi_restore s6
+; RV32IZCMP-NEXT: .cfi_restore s7
+; RV32IZCMP-NEXT: .cfi_restore s8
+; RV32IZCMP-NEXT: .cfi_restore s9
+; RV32IZCMP-NEXT: .cfi_restore s10
+; RV32IZCMP-NEXT: .cfi_restore s11
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-NEXT: mret
;
; RV64IZCMP-LABEL: callee_with_irq:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
+; RV64IZCMP-NEXT: .cfi_offset ra, -104
+; RV64IZCMP-NEXT: .cfi_offset s0, -96
+; RV64IZCMP-NEXT: .cfi_offset s1, -88
+; RV64IZCMP-NEXT: .cfi_offset s2, -80
+; RV64IZCMP-NEXT: .cfi_offset s3, -72
+; RV64IZCMP-NEXT: .cfi_offset s4, -64
+; RV64IZCMP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-NEXT: .cfi_offset s6, -48
+; RV64IZCMP-NEXT: .cfi_offset s7, -40
+; RV64IZCMP-NEXT: .cfi_offset s8, -32
+; RV64IZCMP-NEXT: .cfi_offset s9, -24
+; RV64IZCMP-NEXT: .cfi_offset s10, -16
+; RV64IZCMP-NEXT: .cfi_offset s11, -8
; RV64IZCMP-NEXT: addi sp, sp, -128
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288
; RV64IZCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
@@ -2085,6 +2492,21 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: .cfi_offset t0, -120
+; RV64IZCMP-NEXT: .cfi_offset t1, -128
+; RV64IZCMP-NEXT: .cfi_offset t2, -136
+; RV64IZCMP-NEXT: .cfi_offset a0, -144
+; RV64IZCMP-NEXT: .cfi_offset a1, -152
+; RV64IZCMP-NEXT: .cfi_offset a2, -160
+; RV64IZCMP-NEXT: .cfi_offset a3, -168
+; RV64IZCMP-NEXT: .cfi_offset a4, -176
+; RV64IZCMP-NEXT: .cfi_offset a5, -184
+; RV64IZCMP-NEXT: .cfi_offset a6, -192
+; RV64IZCMP-NEXT: .cfi_offset a7, -200
+; RV64IZCMP-NEXT: .cfi_offset t3, -208
+; RV64IZCMP-NEXT: .cfi_offset t4, -216
+; RV64IZCMP-NEXT: .cfi_offset t5, -224
+; RV64IZCMP-NEXT: .cfi_offset t6, -232
; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
@@ -2178,14 +2600,59 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: .cfi_restore t0
+; RV64IZCMP-NEXT: .cfi_restore t1
+; RV64IZCMP-NEXT: .cfi_restore t2
+; RV64IZCMP-NEXT: .cfi_restore a0
+; RV64IZCMP-NEXT: .cfi_restore a1
+; RV64IZCMP-NEXT: .cfi_restore a2
+; RV64IZCMP-NEXT: .cfi_restore a3
+; RV64IZCMP-NEXT: .cfi_restore a4
+; RV64IZCMP-NEXT: .cfi_restore a5
+; RV64IZCMP-NEXT: .cfi_restore a6
+; RV64IZCMP-NEXT: .cfi_restore a7
+; RV64IZCMP-NEXT: .cfi_restore t3
+; RV64IZCMP-NEXT: .cfi_restore t4
+; RV64IZCMP-NEXT: .cfi_restore t5
+; RV64IZCMP-NEXT: .cfi_restore t6
; RV64IZCMP-NEXT: addi sp, sp, 128
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-NEXT: .cfi_restore ra
+; RV64IZCMP-NEXT: .cfi_restore s0
+; RV64IZCMP-NEXT: .cfi_restore s1
+; RV64IZCMP-NEXT: .cfi_restore s2
+; RV64IZCMP-NEXT: .cfi_restore s3
+; RV64IZCMP-NEXT: .cfi_restore s4
+; RV64IZCMP-NEXT: .cfi_restore s5
+; RV64IZCMP-NEXT: .cfi_restore s6
+; RV64IZCMP-NEXT: .cfi_restore s7
+; RV64IZCMP-NEXT: .cfi_restore s8
+; RV64IZCMP-NEXT: .cfi_restore s9
+; RV64IZCMP-NEXT: .cfi_restore s10
+; RV64IZCMP-NEXT: .cfi_restore s11
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-NEXT: mret
;
; RV32IZCMP-SR-LABEL: callee_with_irq:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -112
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 112
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -52
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -48
+; RV32IZCMP-SR-NEXT: .cfi_offset s1, -44
+; RV32IZCMP-SR-NEXT: .cfi_offset s2, -40
+; RV32IZCMP-SR-NEXT: .cfi_offset s3, -36
+; RV32IZCMP-SR-NEXT: .cfi_offset s4, -32
+; RV32IZCMP-SR-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-SR-NEXT: .cfi_offset s6, -24
+; RV32IZCMP-SR-NEXT: .cfi_offset s7, -20
+; RV32IZCMP-SR-NEXT: .cfi_offset s8, -16
+; RV32IZCMP-SR-NEXT: .cfi_offset s9, -12
+; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8
+; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
; RV32IZCMP-SR-NEXT: addi sp, sp, -48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
; RV32IZCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
@@ -2201,6 +2668,21 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: .cfi_offset t0, -68
+; RV32IZCMP-SR-NEXT: .cfi_offset t1, -72
+; RV32IZCMP-SR-NEXT: .cfi_offset t2, -76
+; RV32IZCMP-SR-NEXT: .cfi_offset a0, -80
+; RV32IZCMP-SR-NEXT: .cfi_offset a1, -84
+; RV32IZCMP-SR-NEXT: .cfi_offset a2, -88
+; RV32IZCMP-SR-NEXT: .cfi_offset a3, -92
+; RV32IZCMP-SR-NEXT: .cfi_offset a4, -96
+; RV32IZCMP-SR-NEXT: .cfi_offset a5, -100
+; RV32IZCMP-SR-NEXT: .cfi_offset a6, -104
+; RV32IZCMP-SR-NEXT: .cfi_offset a7, -108
+; RV32IZCMP-SR-NEXT: .cfi_offset t3, -112
+; RV32IZCMP-SR-NEXT: .cfi_offset t4, -116
+; RV32IZCMP-SR-NEXT: .cfi_offset t5, -120
+; RV32IZCMP-SR-NEXT: .cfi_offset t6, -124
; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
@@ -2294,14 +2776,59 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: .cfi_restore t0
+; RV32IZCMP-SR-NEXT: .cfi_restore t1
+; RV32IZCMP-SR-NEXT: .cfi_restore t2
+; RV32IZCMP-SR-NEXT: .cfi_restore a0
+; RV32IZCMP-SR-NEXT: .cfi_restore a1
+; RV32IZCMP-SR-NEXT: .cfi_restore a2
+; RV32IZCMP-SR-NEXT: .cfi_restore a3
+; RV32IZCMP-SR-NEXT: .cfi_restore a4
+; RV32IZCMP-SR-NEXT: .cfi_restore a5
+; RV32IZCMP-SR-NEXT: .cfi_restore a6
+; RV32IZCMP-SR-NEXT: .cfi_restore a7
+; RV32IZCMP-SR-NEXT: .cfi_restore t3
+; RV32IZCMP-SR-NEXT: .cfi_restore t4
+; RV32IZCMP-SR-NEXT: .cfi_restore t5
+; RV32IZCMP-SR-NEXT: .cfi_restore t6
; RV32IZCMP-SR-NEXT: addi sp, sp, 48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 112
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 112
+; RV32IZCMP-SR-NEXT: .cfi_restore ra
+; RV32IZCMP-SR-NEXT: .cfi_restore s0
+; RV32IZCMP-SR-NEXT: .cfi_restore s1
+; RV32IZCMP-SR-NEXT: .cfi_restore s2
+; RV32IZCMP-SR-NEXT: .cfi_restore s3
+; RV32IZCMP-SR-NEXT: .cfi_restore s4
+; RV32IZCMP-SR-NEXT: .cfi_restore s5
+; RV32IZCMP-SR-NEXT: .cfi_restore s6
+; RV32IZCMP-SR-NEXT: .cfi_restore s7
+; RV32IZCMP-SR-NEXT: .cfi_restore s8
+; RV32IZCMP-SR-NEXT: .cfi_restore s9
+; RV32IZCMP-SR-NEXT: .cfi_restore s10
+; RV32IZCMP-SR-NEXT: .cfi_restore s11
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-SR-NEXT: mret
;
; RV64IZCMP-SR-LABEL: callee_with_irq:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -104
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -96
+; RV64IZCMP-SR-NEXT: .cfi_offset s1, -88
+; RV64IZCMP-SR-NEXT: .cfi_offset s2, -80
+; RV64IZCMP-SR-NEXT: .cfi_offset s3, -72
+; RV64IZCMP-SR-NEXT: .cfi_offset s4, -64
+; RV64IZCMP-SR-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-SR-NEXT: .cfi_offset s6, -48
+; RV64IZCMP-SR-NEXT: .cfi_offset s7, -40
+; RV64IZCMP-SR-NEXT: .cfi_offset s8, -32
+; RV64IZCMP-SR-NEXT: .cfi_offset s9, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16
+; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
; RV64IZCMP-SR-NEXT: addi sp, sp, -128
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 288
; RV64IZCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
@@ -2317,6 +2844,21 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: .cfi_offset t0, -120
+; RV64IZCMP-SR-NEXT: .cfi_offset t1, -128
+; RV64IZCMP-SR-NEXT: .cfi_offset t2, -136
+; RV64IZCMP-SR-NEXT: .cfi_offset a0, -144
+; RV64IZCMP-SR-NEXT: .cfi_offset a1, -152
+; RV64IZCMP-SR-NEXT: .cfi_offset a2, -160
+; RV64IZCMP-SR-NEXT: .cfi_offset a3, -168
+; RV64IZCMP-SR-NEXT: .cfi_offset a4, -176
+; RV64IZCMP-SR-NEXT: .cfi_offset a5, -184
+; RV64IZCMP-SR-NEXT: .cfi_offset a6, -192
+; RV64IZCMP-SR-NEXT: .cfi_offset a7, -200
+; RV64IZCMP-SR-NEXT: .cfi_offset t3, -208
+; RV64IZCMP-SR-NEXT: .cfi_offset t4, -216
+; RV64IZCMP-SR-NEXT: .cfi_offset t5, -224
+; RV64IZCMP-SR-NEXT: .cfi_offset t6, -232
; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
@@ -2410,13 +2952,44 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: .cfi_restore t0
+; RV64IZCMP-SR-NEXT: .cfi_restore t1
+; RV64IZCMP-SR-NEXT: .cfi_restore t2
+; RV64IZCMP-SR-NEXT: .cfi_restore a0
+; RV64IZCMP-SR-NEXT: .cfi_restore a1
+; RV64IZCMP-SR-NEXT: .cfi_restore a2
+; RV64IZCMP-SR-NEXT: .cfi_restore a3
+; RV64IZCMP-SR-NEXT: .cfi_restore a4
+; RV64IZCMP-SR-NEXT: .cfi_restore a5
+; RV64IZCMP-SR-NEXT: .cfi_restore a6
+; RV64IZCMP-SR-NEXT: .cfi_restore a7
+; RV64IZCMP-SR-NEXT: .cfi_restore t3
+; RV64IZCMP-SR-NEXT: .cfi_restore t4
+; RV64IZCMP-SR-NEXT: .cfi_restore t5
+; RV64IZCMP-SR-NEXT: .cfi_restore t6
; RV64IZCMP-SR-NEXT: addi sp, sp, 128
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 160
+; RV64IZCMP-SR-NEXT: .cfi_restore ra
+; RV64IZCMP-SR-NEXT: .cfi_restore s0
+; RV64IZCMP-SR-NEXT: .cfi_restore s1
+; RV64IZCMP-SR-NEXT: .cfi_restore s2
+; RV64IZCMP-SR-NEXT: .cfi_restore s3
+; RV64IZCMP-SR-NEXT: .cfi_restore s4
+; RV64IZCMP-SR-NEXT: .cfi_restore s5
+; RV64IZCMP-SR-NEXT: .cfi_restore s6
+; RV64IZCMP-SR-NEXT: .cfi_restore s7
+; RV64IZCMP-SR-NEXT: .cfi_restore s8
+; RV64IZCMP-SR-NEXT: .cfi_restore s9
+; RV64IZCMP-SR-NEXT: .cfi_restore s10
+; RV64IZCMP-SR-NEXT: .cfi_restore s11
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-SR-NEXT: mret
;
; RV32I-LABEL: callee_with_irq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -144
+; RV32I-NEXT: .cfi_def_cfa_offset 144
; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t0, 136(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t1, 132(sp) # 4-byte Folded Spill
@@ -2445,6 +3018,34 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset t0, -8
+; RV32I-NEXT: .cfi_offset t1, -12
+; RV32I-NEXT: .cfi_offset t2, -16
+; RV32I-NEXT: .cfi_offset s0, -20
+; RV32I-NEXT: .cfi_offset s1, -24
+; RV32I-NEXT: .cfi_offset a0, -28
+; RV32I-NEXT: .cfi_offset a1, -32
+; RV32I-NEXT: .cfi_offset a2, -36
+; RV32I-NEXT: .cfi_offset a3, -40
+; RV32I-NEXT: .cfi_offset a4, -44
+; RV32I-NEXT: .cfi_offset a5, -48
+; RV32I-NEXT: .cfi_offset a6, -52
+; RV32I-NEXT: .cfi_offset a7, -56
+; RV32I-NEXT: .cfi_offset s2, -60
+; RV32I-NEXT: .cfi_offset s3, -64
+; RV32I-NEXT: .cfi_offset s4, -68
+; RV32I-NEXT: .cfi_offset s5, -72
+; RV32I-NEXT: .cfi_offset s6, -76
+; RV32I-NEXT: .cfi_offset s7, -80
+; RV32I-NEXT: .cfi_offset s8, -84
+; RV32I-NEXT: .cfi_offset s9, -88
+; RV32I-NEXT: .cfi_offset s10, -92
+; RV32I-NEXT: .cfi_offset s11, -96
+; RV32I-NEXT: .cfi_offset t3, -100
+; RV32I-NEXT: .cfi_offset t4, -104
+; RV32I-NEXT: .cfi_offset t5, -108
+; RV32I-NEXT: .cfi_offset t6, -112
; RV32I-NEXT: lui a7, %hi(var_test_irq)
; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32I-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
@@ -2551,12 +3152,42 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore t0
+; RV32I-NEXT: .cfi_restore t1
+; RV32I-NEXT: .cfi_restore t2
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore a0
+; RV32I-NEXT: .cfi_restore a1
+; RV32I-NEXT: .cfi_restore a2
+; RV32I-NEXT: .cfi_restore a3
+; RV32I-NEXT: .cfi_restore a4
+; RV32I-NEXT: .cfi_restore a5
+; RV32I-NEXT: .cfi_restore a6
+; RV32I-NEXT: .cfi_restore a7
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
+; RV32I-NEXT: .cfi_restore s5
+; RV32I-NEXT: .cfi_restore s6
+; RV32I-NEXT: .cfi_restore s7
+; RV32I-NEXT: .cfi_restore s8
+; RV32I-NEXT: .cfi_restore s9
+; RV32I-NEXT: .cfi_restore s10
+; RV32I-NEXT: .cfi_restore s11
+; RV32I-NEXT: .cfi_restore t3
+; RV32I-NEXT: .cfi_restore t4
+; RV32I-NEXT: .cfi_restore t5
+; RV32I-NEXT: .cfi_restore t6
; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: mret
;
; RV64I-LABEL: callee_with_irq:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -272
+; RV64I-NEXT: .cfi_def_cfa_offset 272
; RV64I-NEXT: sd ra, 264(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t0, 256(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t1, 248(sp) # 8-byte Folded Spill
@@ -2585,6 +3216,34 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset t0, -16
+; RV64I-NEXT: .cfi_offset t1, -24
+; RV64I-NEXT: .cfi_offset t2, -32
+; RV64I-NEXT: .cfi_offset s0, -40
+; RV64I-NEXT: .cfi_offset s1, -48
+; RV64I-NEXT: .cfi_offset a0, -56
+; RV64I-NEXT: .cfi_offset a1, -64
+; RV64I-NEXT: .cfi_offset a2, -72
+; RV64I-NEXT: .cfi_offset a3, -80
+; RV64I-NEXT: .cfi_offset a4, -88
+; RV64I-NEXT: .cfi_offset a5, -96
+; RV64I-NEXT: .cfi_offset a6, -104
+; RV64I-NEXT: .cfi_offset a7, -112
+; RV64I-NEXT: .cfi_offset s2, -120
+; RV64I-NEXT: .cfi_offset s3, -128
+; RV64I-NEXT: .cfi_offset s4, -136
+; RV64I-NEXT: .cfi_offset s5, -144
+; RV64I-NEXT: .cfi_offset s6, -152
+; RV64I-NEXT: .cfi_offset s7, -160
+; RV64I-NEXT: .cfi_offset s8, -168
+; RV64I-NEXT: .cfi_offset s9, -176
+; RV64I-NEXT: .cfi_offset s10, -184
+; RV64I-NEXT: .cfi_offset s11, -192
+; RV64I-NEXT: .cfi_offset t3, -200
+; RV64I-NEXT: .cfi_offset t4, -208
+; RV64I-NEXT: .cfi_offset t5, -216
+; RV64I-NEXT: .cfi_offset t6, -224
; RV64I-NEXT: lui a7, %hi(var_test_irq)
; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
@@ -2691,17 +3350,60 @@ define void @callee_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
+; RV64I-NEXT: .cfi_restore t0
+; RV64I-NEXT: .cfi_restore t1
+; RV64I-NEXT: .cfi_restore t2
+; RV64I-NEXT: .cfi_restore s0
+; RV64I-NEXT: .cfi_restore s1
+; RV64I-NEXT: .cfi_restore a0
+; RV64I-NEXT: .cfi_restore a1
+; RV64I-NEXT: .cfi_restore a2
+; RV64I-NEXT: .cfi_restore a3
+; RV64I-NEXT: .cfi_restore a4
+; RV64I-NEXT: .cfi_restore a5
+; RV64I-NEXT: .cfi_restore a6
+; RV64I-NEXT: .cfi_restore a7
+; RV64I-NEXT: .cfi_restore s2
+; RV64I-NEXT: .cfi_restore s3
+; RV64I-NEXT: .cfi_restore s4
+; RV64I-NEXT: .cfi_restore s5
+; RV64I-NEXT: .cfi_restore s6
+; RV64I-NEXT: .cfi_restore s7
+; RV64I-NEXT: .cfi_restore s8
+; RV64I-NEXT: .cfi_restore s9
+; RV64I-NEXT: .cfi_restore s10
+; RV64I-NEXT: .cfi_restore s11
+; RV64I-NEXT: .cfi_restore t3
+; RV64I-NEXT: .cfi_restore t4
+; RV64I-NEXT: .cfi_restore t5
+; RV64I-NEXT: .cfi_restore t6
; RV64I-NEXT: addi sp, sp, 272
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: mret
%val = load [32 x i32], ptr @var_test_irq
store volatile [32 x i32] %val, ptr @var_test_irq
ret void
}
-define void @callee_no_irq() nounwind{
+define void @callee_no_irq() {
; RV32IZCMP-LABEL: callee_no_irq:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96
+; RV32IZCMP-NEXT: .cfi_offset ra, -52
+; RV32IZCMP-NEXT: .cfi_offset s0, -48
+; RV32IZCMP-NEXT: .cfi_offset s1, -44
+; RV32IZCMP-NEXT: .cfi_offset s2, -40
+; RV32IZCMP-NEXT: .cfi_offset s3, -36
+; RV32IZCMP-NEXT: .cfi_offset s4, -32
+; RV32IZCMP-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-NEXT: .cfi_offset s6, -24
+; RV32IZCMP-NEXT: .cfi_offset s7, -20
+; RV32IZCMP-NEXT: .cfi_offset s8, -16
+; RV32IZCMP-NEXT: .cfi_offset s9, -12
+; RV32IZCMP-NEXT: .cfi_offset s10, -8
+; RV32IZCMP-NEXT: .cfi_offset s11, -4
; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
@@ -2785,6 +3487,20 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-LABEL: callee_no_irq:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
+; RV64IZCMP-NEXT: .cfi_offset ra, -104
+; RV64IZCMP-NEXT: .cfi_offset s0, -96
+; RV64IZCMP-NEXT: .cfi_offset s1, -88
+; RV64IZCMP-NEXT: .cfi_offset s2, -80
+; RV64IZCMP-NEXT: .cfi_offset s3, -72
+; RV64IZCMP-NEXT: .cfi_offset s4, -64
+; RV64IZCMP-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-NEXT: .cfi_offset s6, -48
+; RV64IZCMP-NEXT: .cfi_offset s7, -40
+; RV64IZCMP-NEXT: .cfi_offset s8, -32
+; RV64IZCMP-NEXT: .cfi_offset s9, -24
+; RV64IZCMP-NEXT: .cfi_offset s10, -16
+; RV64IZCMP-NEXT: .cfi_offset s11, -8
; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
@@ -2868,6 +3584,20 @@ define void @callee_no_irq() nounwind{
; RV32IZCMP-SR-LABEL: callee_no_irq:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 96
+; RV32IZCMP-SR-NEXT: .cfi_offset ra, -52
+; RV32IZCMP-SR-NEXT: .cfi_offset s0, -48
+; RV32IZCMP-SR-NEXT: .cfi_offset s1, -44
+; RV32IZCMP-SR-NEXT: .cfi_offset s2, -40
+; RV32IZCMP-SR-NEXT: .cfi_offset s3, -36
+; RV32IZCMP-SR-NEXT: .cfi_offset s4, -32
+; RV32IZCMP-SR-NEXT: .cfi_offset s5, -28
+; RV32IZCMP-SR-NEXT: .cfi_offset s6, -24
+; RV32IZCMP-SR-NEXT: .cfi_offset s7, -20
+; RV32IZCMP-SR-NEXT: .cfi_offset s8, -16
+; RV32IZCMP-SR-NEXT: .cfi_offset s9, -12
+; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8
+; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
@@ -2951,6 +3681,20 @@ define void @callee_no_irq() nounwind{
; RV64IZCMP-SR-LABEL: callee_no_irq:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
+; RV64IZCMP-SR-NEXT: .cfi_offset ra, -104
+; RV64IZCMP-SR-NEXT: .cfi_offset s0, -96
+; RV64IZCMP-SR-NEXT: .cfi_offset s1, -88
+; RV64IZCMP-SR-NEXT: .cfi_offset s2, -80
+; RV64IZCMP-SR-NEXT: .cfi_offset s3, -72
+; RV64IZCMP-SR-NEXT: .cfi_offset s4, -64
+; RV64IZCMP-SR-NEXT: .cfi_offset s5, -56
+; RV64IZCMP-SR-NEXT: .cfi_offset s6, -48
+; RV64IZCMP-SR-NEXT: .cfi_offset s7, -40
+; RV64IZCMP-SR-NEXT: .cfi_offset s8, -32
+; RV64IZCMP-SR-NEXT: .cfi_offset s9, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16
+; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
@@ -3034,6 +3778,7 @@ define void @callee_no_irq() nounwind{
; RV32I-LABEL: callee_no_irq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -80
+; RV32I-NEXT: .cfi_def_cfa_offset 80
; RV32I-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
@@ -3047,6 +3792,19 @@ define void @callee_no_irq() nounwind{
; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: .cfi_offset s4, -24
+; RV32I-NEXT: .cfi_offset s5, -28
+; RV32I-NEXT: .cfi_offset s6, -32
+; RV32I-NEXT: .cfi_offset s7, -36
+; RV32I-NEXT: .cfi_offset s8, -40
+; RV32I-NEXT: .cfi_offset s9, -44
+; RV32I-NEXT: .cfi_offset s10, -48
+; RV32I-NEXT: .cfi_offset s11, -52
; RV32I-NEXT: lui a7, %hi(var_test_irq)
; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
@@ -3138,12 +3896,27 @@ define void @callee_no_irq() nounwind{
; RV32I-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
+; RV32I-NEXT: .cfi_restore s5
+; RV32I-NEXT: .cfi_restore s6
+; RV32I-NEXT: .cfi_restore s7
+; RV32I-NEXT: .cfi_restore s8
+; RV32I-NEXT: .cfi_restore s9
+; RV32I-NEXT: .cfi_restore s10
+; RV32I-NEXT: .cfi_restore s11
; RV32I-NEXT: addi sp, sp, 80
+; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: callee_no_irq:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -160
+; RV64I-NEXT: .cfi_def_cfa_offset 160
; RV64I-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
@@ -3157,6 +3930,19 @@ define void @callee_no_irq() nounwind{
; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: .cfi_offset s0, -16
+; RV64I-NEXT: .cfi_offset s1, -24
+; RV64I-NEXT: .cfi_offset s2, -32
+; RV64I-NEXT: .cfi_offset s3, -40
+; RV64I-NEXT: .cfi_offset s4, -48
+; RV64I-NEXT: .cfi_offset s5, -56
+; RV64I-NEXT: .cfi_offset s6, -64
+; RV64I-NEXT: .cfi_offset s7, -72
+; RV64I-NEXT: .cfi_offset s8, -80
+; RV64I-NEXT: .cfi_offset s9, -88
+; RV64I-NEXT: .cfi_offset s10, -96
+; RV64I-NEXT: .cfi_offset s11, -104
; RV64I-NEXT: lui a7, %hi(var_test_irq)
; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
@@ -3248,7 +4034,21 @@ define void @callee_no_irq() nounwind{
; RV64I-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: .cfi_restore ra
+; RV64I-NEXT: .cfi_restore s0
+; RV64I-NEXT: .cfi_restore s1
+; RV64I-NEXT: .cfi_restore s2
+; RV64I-NEXT: .cfi_restore s3
+; RV64I-NEXT: .cfi_restore s4
+; RV64I-NEXT: .cfi_restore s5
+; RV64I-NEXT: .cfi_restore s6
+; RV64I-NEXT: .cfi_restore s7
+; RV64I-NEXT: .cfi_restore s8
+; RV64I-NEXT: .cfi_restore s9
+; RV64I-NEXT: .cfi_restore s10
+; RV64I-NEXT: .cfi_restore s11
; RV64I-NEXT: addi sp, sp, 160
+; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
%val = load [32 x i32], ptr @var_test_irq
store volatile [32 x i32] %val, ptr @var_test_irq
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index f636ab9ebd0ce7..bf2fdafc380daf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -8,14 +8,17 @@
@.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1
-define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, i32 %gvl) nounwind
+define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, i32 %gvl)
; SPILL-O0-LABEL: foo:
; SPILL-O0: # %bb.0:
; SPILL-O0-NEXT: addi sp, sp, -32
+; SPILL-O0-NEXT: .cfi_def_cfa_offset 32
; SPILL-O0-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; SPILL-O0-NEXT: .cfi_offset ra, -4
; SPILL-O0-NEXT: csrr a1, vlenb
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
+; SPILL-O0-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
; SPILL-O0-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; SPILL-O0-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; SPILL-O0-NEXT: vmv1r.v v10, v9
@@ -46,18 +49,25 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: csrr a0, vlenb
; SPILL-O0-NEXT: slli a0, a0, 1
; SPILL-O0-NEXT: add sp, sp, a0
+; SPILL-O0-NEXT: .cfi_def_cfa sp, 32
; SPILL-O0-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; SPILL-O0-NEXT: .cfi_restore ra
; SPILL-O0-NEXT: addi sp, sp, 32
+; SPILL-O0-NEXT: .cfi_def_cfa_offset 0
; SPILL-O0-NEXT: ret
;
; SPILL-O2-LABEL: foo:
; SPILL-O2: # %bb.0:
; SPILL-O2-NEXT: addi sp, sp, -32
+; SPILL-O2-NEXT: .cfi_def_cfa_offset 32
; SPILL-O2-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; SPILL-O2-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; SPILL-O2-NEXT: .cfi_offset ra, -4
+; SPILL-O2-NEXT: .cfi_offset s0, -8
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
; SPILL-O2-NEXT: sub sp, sp, a1
+; SPILL-O2-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
; SPILL-O2-NEXT: mv s0, a0
; SPILL-O2-NEXT: addi a1, sp, 16
; SPILL-O2-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
@@ -81,17 +91,25 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: .cfi_def_cfa sp, 32
; SPILL-O2-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; SPILL-O2-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; SPILL-O2-NEXT: .cfi_restore ra
+; SPILL-O2-NEXT: .cfi_restore s0
; SPILL-O2-NEXT: addi sp, sp, 32
+; SPILL-O2-NEXT: .cfi_def_cfa_offset 0
; SPILL-O2-NEXT: ret
;
; SPILL-O2-ZCMP-LABEL: foo:
; SPILL-O2-ZCMP: # %bb.0:
; SPILL-O2-ZCMP-NEXT: cm.push {ra, s0}, -32
+; SPILL-O2-ZCMP-NEXT: .cfi_def_cfa_offset 32
+; SPILL-O2-ZCMP-NEXT: .cfi_offset ra, -8
+; SPILL-O2-ZCMP-NEXT: .cfi_offset s0, -4
; SPILL-O2-ZCMP-NEXT: csrr a1, vlenb
; SPILL-O2-ZCMP-NEXT: slli a1, a1, 1
; SPILL-O2-ZCMP-NEXT: sub sp, sp, a1
+; SPILL-O2-ZCMP-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
; SPILL-O2-ZCMP-NEXT: mv s0, a0
; SPILL-O2-ZCMP-NEXT: addi a1, sp, 16
; SPILL-O2-ZCMP-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
@@ -115,6 +133,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-ZCMP-NEXT: csrr a0, vlenb
; SPILL-O2-ZCMP-NEXT: slli a0, a0, 1
; SPILL-O2-ZCMP-NEXT: add sp, sp, a0
+; SPILL-O2-ZCMP-NEXT: .cfi_def_cfa sp, 32
; SPILL-O2-ZCMP-NEXT: cm.popret {ra, s0}, 32
{
%x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 7, i32 %gvl)
More information about the llvm-commits
mailing list