[llvm-branch-commits] [llvm] e28b6a6 - [RISCV][NFC] Regenerate RISCV CodeGen tests

Luís Marques via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Dec 9 11:48:31 PST 2020


Author: Michael Munday
Date: 2020-12-09T19:42:49Z
New Revision: e28b6a60bc02d61e50e0da545f337ea482b10240

URL: https://github.com/llvm/llvm-project/commit/e28b6a60bc02d61e50e0da545f337ea482b10240
DIFF: https://github.com/llvm/llvm-project/commit/e28b6a60bc02d61e50e0da545f337ea482b10240.diff

LOG: [RISCV][NFC] Regenerate RISCV CodeGen tests

Regenerated using:

./llvm/utils/update_llc_test_checks.py -u llvm/test/CodeGen/RISCV/*.ll

This has added comments to spill-related instructions and added @plt to
some symbols.

Differential Revision: https://reviews.llvm.org/D92841

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/alloca.ll
    llvm/test/CodeGen/RISCV/analyze-branch.ll
    llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
    llvm/test/CodeGen/RISCV/atomic-load-store.ll
    llvm/test/CodeGen/RISCV/atomic-rmw.ll
    llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/byval.ll
    llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
    llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
    llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
    llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
    llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
    llvm/test/CodeGen/RISCV/calls.ll
    llvm/test/CodeGen/RISCV/copysign-casts.ll
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/double-br-fcmp.ll
    llvm/test/CodeGen/RISCV/double-calling-conv.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/double-frem.ll
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/double-mem.ll
    llvm/test/CodeGen/RISCV/double-previous-failure.ll
    llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
    llvm/test/CodeGen/RISCV/exception-pointer-register.ll
    llvm/test/CodeGen/RISCV/fastcc-float.ll
    llvm/test/CodeGen/RISCV/fastcc-int.ll
    llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
    llvm/test/CodeGen/RISCV/float-br-fcmp.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/float-frem.ll
    llvm/test/CodeGen/RISCV/float-intrinsics.ll
    llvm/test/CodeGen/RISCV/float-mem.ll
    llvm/test/CodeGen/RISCV/fp128.ll
    llvm/test/CodeGen/RISCV/fp16-promote.ll
    llvm/test/CodeGen/RISCV/frame-info.ll
    llvm/test/CodeGen/RISCV/frame.ll
    llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
    llvm/test/CodeGen/RISCV/ghccc-rv32.ll
    llvm/test/CodeGen/RISCV/ghccc-rv64.ll
    llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/half-br-fcmp.ll
    llvm/test/CodeGen/RISCV/half-convert.ll
    llvm/test/CodeGen/RISCV/half-mem.ll
    llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
    llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
    llvm/test/CodeGen/RISCV/inline-asm-d-abi-names.ll
    llvm/test/CodeGen/RISCV/inline-asm-f-abi-names.ll
    llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
    llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
    llvm/test/CodeGen/RISCV/interrupt-attr.ll
    llvm/test/CodeGen/RISCV/large-stack.ll
    llvm/test/CodeGen/RISCV/mul.ll
    llvm/test/CodeGen/RISCV/rem.ll
    llvm/test/CodeGen/RISCV/remat.ll
    llvm/test/CodeGen/RISCV/rv32Zbb.ll
    llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
    llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
    llvm/test/CodeGen/RISCV/rv64-large-stack.ll
    llvm/test/CodeGen/RISCV/rv64Zbb.ll
    llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
    llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
    llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
    llvm/test/CodeGen/RISCV/select-and.ll
    llvm/test/CodeGen/RISCV/select-or.ll
    llvm/test/CodeGen/RISCV/shadowcallstack.ll
    llvm/test/CodeGen/RISCV/shifts.ll
    llvm/test/CodeGen/RISCV/shrinkwrap.ll
    llvm/test/CodeGen/RISCV/split-sp-adjust.ll
    llvm/test/CodeGen/RISCV/srem-lkk.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
    llvm/test/CodeGen/RISCV/stack-realignment.ll
    llvm/test/CodeGen/RISCV/stack-store-check.ll
    llvm/test/CodeGen/RISCV/tls-models.ll
    llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
    llvm/test/CodeGen/RISCV/urem-lkk.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/vararg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll
index 6cd9bf46376d..1c211a09c0ff 100644
--- a/llvm/test/CodeGen/RISCV/alloca.ll
+++ b/llvm/test/CodeGen/RISCV/alloca.ll
@@ -11,17 +11,17 @@ define void @simple_alloca(i32 %n) nounwind {
 ; RV32I-LABEL: simple_alloca:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 16
 ; RV32I-NEXT:    addi a0, a0, 15
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call notdead
+; RV32I-NEXT:    call notdead at plt
 ; RV32I-NEXT:    addi sp, s0, -16
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = alloca i8, i32 %n
@@ -36,21 +36,21 @@ define void @scoped_alloca(i32 %n) nounwind {
 ; RV32I-LABEL: scoped_alloca:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 16
 ; RV32I-NEXT:    mv s1, sp
 ; RV32I-NEXT:    addi a0, a0, 15
 ; RV32I-NEXT:    andi a0, a0, -16
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
-; RV32I-NEXT:    call notdead
+; RV32I-NEXT:    call notdead at plt
 ; RV32I-NEXT:    mv sp, s1
 ; RV32I-NEXT:    addi sp, s0, -16
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %sp = call i8* @llvm.stacksave()
@@ -68,8 +68,8 @@ define void @alloca_callframe(i32 %n) nounwind {
 ; RV32I-LABEL: alloca_callframe:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 16
 ; RV32I-NEXT:    addi a0, a0, 15
 ; RV32I-NEXT:    andi a0, a0, -16
@@ -91,11 +91,11 @@ define void @alloca_callframe(i32 %n) nounwind {
 ; RV32I-NEXT:    addi a6, zero, 7
 ; RV32I-NEXT:    addi a7, zero, 8
 ; RV32I-NEXT:    sw t0, 0(sp)
-; RV32I-NEXT:    call func
+; RV32I-NEXT:    call func at plt
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    addi sp, s0, -16
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = alloca i8, i32 %n

diff  --git a/llvm/test/CodeGen/RISCV/analyze-branch.ll b/llvm/test/CodeGen/RISCV/analyze-branch.ll
index f5b07836e87d..a932fcb47e24 100644
--- a/llvm/test/CodeGen/RISCV/analyze-branch.ll
+++ b/llvm/test/CodeGen/RISCV/analyze-branch.ll
@@ -16,17 +16,17 @@ define void @test_bcc_fallthrough_taken(i32 %in) nounwind {
 ; RV32I-LABEL: test_bcc_fallthrough_taken:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 42
 ; RV32I-NEXT:    bne a0, a1, .LBB0_3
 ; RV32I-NEXT:  # %bb.1: # %true
-; RV32I-NEXT:    call test_true
+; RV32I-NEXT:    call test_true at plt
 ; RV32I-NEXT:  .LBB0_2: # %true
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_3: # %false
-; RV32I-NEXT:    call test_false
+; RV32I-NEXT:    call test_false at plt
 ; RV32I-NEXT:    j .LBB0_2
   %tst = icmp eq i32 %in, 42
   br i1 %tst, label %true, label %false, !prof !0
@@ -48,17 +48,17 @@ define void @test_bcc_fallthrough_nottaken(i32 %in) nounwind {
 ; RV32I-LABEL: test_bcc_fallthrough_nottaken:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 42
 ; RV32I-NEXT:    beq a0, a1, .LBB1_3
 ; RV32I-NEXT:  # %bb.1: # %false
-; RV32I-NEXT:    call test_false
+; RV32I-NEXT:    call test_false at plt
 ; RV32I-NEXT:  .LBB1_2: # %true
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB1_3: # %true
-; RV32I-NEXT:    call test_true
+; RV32I-NEXT:    call test_true at plt
 ; RV32I-NEXT:    j .LBB1_2
   %tst = icmp eq i32 %in, 42
   br i1 %tst, label %true, label %false, !prof !1

diff  --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index f2691ba1a771..6656e3d964d6 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -12,13 +12,13 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -49,13 +49,13 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -90,13 +90,13 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_acquire_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -127,13 +127,13 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_acquire_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -168,13 +168,13 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_acquire_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -205,13 +205,13 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_acquire_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -246,13 +246,13 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_release_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -283,13 +283,13 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_release_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -324,13 +324,13 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_release_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -361,13 +361,13 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_release_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -402,13 +402,13 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_acq_rel_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -439,13 +439,13 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_acq_rel_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -480,13 +480,13 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_acq_rel_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -517,13 +517,13 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_acq_rel_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -558,13 +558,13 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_seq_cst_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -595,13 +595,13 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_seq_cst_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -636,13 +636,13 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_seq_cst_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -673,13 +673,13 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_seq_cst_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -714,13 +714,13 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-LABEL: cmpxchg_i8_seq_cst_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    call __atomic_compare_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -751,13 +751,13 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-LABEL: cmpxchg_i8_seq_cst_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -792,13 +792,13 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun
 ; RV32I-LABEL: cmpxchg_i16_monotonic_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -830,13 +830,13 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun
 ; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -872,13 +872,13 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i16_acquire_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -910,13 +910,13 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i16_acquire_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -952,13 +952,13 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i16_acquire_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -990,13 +990,13 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i16_acquire_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1032,13 +1032,13 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i16_release_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1070,13 +1070,13 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i16_release_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1112,13 +1112,13 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i16_release_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1150,13 +1150,13 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i16_release_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1192,13 +1192,13 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i16_acq_rel_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1230,13 +1230,13 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i16_acq_rel_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1272,13 +1272,13 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i16_acq_rel_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1310,13 +1310,13 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i16_acq_rel_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1352,13 +1352,13 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i16_seq_cst_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1390,13 +1390,13 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i16_seq_cst_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1432,13 +1432,13 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i16_seq_cst_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1470,13 +1470,13 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i16_seq_cst_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1512,13 +1512,13 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i16_seq_cst_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    call __atomic_compare_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1550,13 +1550,13 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i16_seq_cst_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1592,13 +1592,13 @@ define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) noun
 ; RV32I-LABEL: cmpxchg_i32_monotonic_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1616,13 +1616,13 @@ define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) noun
 ; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1645,13 +1645,13 @@ define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i32_acquire_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1669,13 +1669,13 @@ define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i32_acquire_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1698,13 +1698,13 @@ define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i32_acquire_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1722,13 +1722,13 @@ define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i32_acquire_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1751,13 +1751,13 @@ define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i32_release_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1775,13 +1775,13 @@ define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i32_release_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1804,13 +1804,13 @@ define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i32_release_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1828,13 +1828,13 @@ define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i32_release_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1857,13 +1857,13 @@ define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i32_acq_rel_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1881,13 +1881,13 @@ define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i32_acq_rel_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1910,13 +1910,13 @@ define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i32_acq_rel_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1934,13 +1934,13 @@ define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i32_acq_rel_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1963,13 +1963,13 @@ define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i32_seq_cst_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1987,13 +1987,13 @@ define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-LABEL: cmpxchg_i32_seq_cst_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2016,13 +2016,13 @@ define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i32_seq_cst_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -2040,13 +2040,13 @@ define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i32_seq_cst_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2069,13 +2069,13 @@ define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i32_seq_cst_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    call __atomic_compare_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -2093,13 +2093,13 @@ define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-LABEL: cmpxchg_i32_seq_cst_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2122,7 +2122,7 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) noun
 ; RV32I-LABEL: cmpxchg_i64_monotonic_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
@@ -2130,15 +2130,15 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) noun
 ; RV32I-NEXT:    mv a3, a4
 ; RV32I-NEXT:    mv a4, zero
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_monotonic_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
@@ -2146,21 +2146,21 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) noun
 ; RV32IA-NEXT:    mv a3, a4
 ; RV32IA-NEXT:    mv a4, zero
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_monotonic_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2182,7 +2182,7 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i64_acquire_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a5, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2191,15 +2191,15 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_acquire_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a5, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2208,21 +2208,21 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_acquire_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2244,7 +2244,7 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i64_acquire_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a6, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2253,15 +2253,15 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_acquire_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a6, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2270,21 +2270,21 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_acquire_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2306,7 +2306,7 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i64_release_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a5, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2315,15 +2315,15 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_release_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a5, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2332,21 +2332,21 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_release_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2368,7 +2368,7 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i64_release_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a6, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2377,15 +2377,15 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_release_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a6, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2394,21 +2394,21 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_release_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2430,7 +2430,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i64_acq_rel_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a5, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2439,15 +2439,15 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_acq_rel_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a5, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2456,21 +2456,21 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_acq_rel_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2492,7 +2492,7 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i64_acq_rel_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a6, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2501,15 +2501,15 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_acq_rel_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a6, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2518,21 +2518,21 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_acq_rel_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2554,7 +2554,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-LABEL: cmpxchg_i64_seq_cst_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a5, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2563,15 +2563,15 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_seq_cst_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a5, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2580,21 +2580,21 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_seq_cst_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2616,7 +2616,7 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i64_seq_cst_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a6, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2625,15 +2625,15 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_seq_cst_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a6, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2642,21 +2642,21 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_seq_cst_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 2
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2678,7 +2678,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-LABEL: cmpxchg_i64_seq_cst_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a6, a4
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
@@ -2687,15 +2687,15 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    addi a5, zero, 5
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
-; RV32I-NEXT:    call __atomic_compare_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: cmpxchg_i64_seq_cst_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a6, a4
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
@@ -2704,21 +2704,21 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    addi a5, zero, 5
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: cmpxchg_i64_seq_cst_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    call __atomic_compare_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
index da6f986eb752..60a65d2f214b 100644
--- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
@@ -12,10 +12,10 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i8_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -27,10 +27,10 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i8_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -46,10 +46,10 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -61,10 +61,10 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -80,10 +80,10 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 2
-; RV32I-NEXT:    call __atomic_load_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -96,10 +96,10 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    call __atomic_load_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -116,10 +116,10 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 5
-; RV32I-NEXT:    call __atomic_load_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -133,10 +133,10 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __atomic_load_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -154,10 +154,10 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i16_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -169,10 +169,10 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i16_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -188,10 +188,10 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -203,10 +203,10 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -222,10 +222,10 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 2
-; RV32I-NEXT:    call __atomic_load_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -238,10 +238,10 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    call __atomic_load_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -258,10 +258,10 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 5
-; RV32I-NEXT:    call __atomic_load_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -275,10 +275,10 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __atomic_load_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -296,10 +296,10 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i32_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -311,10 +311,10 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i32_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -330,10 +330,10 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -345,10 +345,10 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -364,10 +364,10 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 2
-; RV32I-NEXT:    call __atomic_load_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -380,10 +380,10 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    call __atomic_load_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -400,10 +400,10 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 5
-; RV32I-NEXT:    call __atomic_load_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -417,10 +417,10 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
 ; RV64I-LABEL: atomic_load_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __atomic_load_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -438,30 +438,30 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i64_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_load_i64_unordered:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a1, zero
-; RV32IA-NEXT:    call __atomic_load_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_load_i64_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -477,30 +477,30 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    call __atomic_load_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_load_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a1, zero
-; RV32IA-NEXT:    call __atomic_load_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_load_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __atomic_load_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -516,30 +516,30 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 2
-; RV32I-NEXT:    call __atomic_load_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_load_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a1, zero, 2
-; RV32IA-NEXT:    call __atomic_load_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_load_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    call __atomic_load_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -556,30 +556,30 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 5
-; RV32I-NEXT:    call __atomic_load_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_load_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_load_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a1, zero, 5
-; RV32IA-NEXT:    call __atomic_load_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_load_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_load_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __atomic_load_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_load_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -597,10 +597,10 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i8_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_store_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -612,10 +612,10 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i8_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -631,10 +631,10 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_store_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -646,10 +646,10 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -665,10 +665,10 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_store_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -681,10 +681,10 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_store_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -701,10 +701,10 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_store_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -717,10 +717,10 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_store_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -737,10 +737,10 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i16_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_store_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -752,10 +752,10 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i16_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -771,10 +771,10 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_store_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -786,10 +786,10 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -805,10 +805,10 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_store_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -821,10 +821,10 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_store_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -841,10 +841,10 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_store_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -857,10 +857,10 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_store_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -877,10 +877,10 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i32_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_store_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -892,10 +892,10 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i32_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -911,10 +911,10 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_store_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -926,10 +926,10 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -945,10 +945,10 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_store_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -961,10 +961,10 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_store_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -981,10 +981,10 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_store_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -997,10 +997,10 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomic_store_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_store_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1017,30 +1017,30 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i64_unordered:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_store_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_store_i64_unordered:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_store_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_store_i64_unordered:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1056,30 +1056,30 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_store_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_store_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_store_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_store_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_store_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1095,30 +1095,30 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_store_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_store_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_store_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_store_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_store_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1135,30 +1135,30 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomic_store_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_store_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_store_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomic_store_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_store_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_store_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomic_store_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_store_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_store_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index 4b2e19504c40..62bdfce81504 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -12,10 +12,10 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -43,10 +43,10 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -78,10 +78,10 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -109,10 +109,10 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -144,10 +144,10 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -175,10 +175,10 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -210,10 +210,10 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -241,10 +241,10 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -276,10 +276,10 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_exchange_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -307,10 +307,10 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_exchange_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -342,10 +342,10 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_add_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -373,10 +373,10 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_add_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -408,10 +408,10 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_add_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -439,10 +439,10 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_add_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -474,10 +474,10 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_add_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -505,10 +505,10 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_add_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -540,10 +540,10 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_add_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -571,10 +571,10 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_add_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -606,10 +606,10 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_add_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -637,10 +637,10 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_add_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -672,10 +672,10 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_sub_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -703,10 +703,10 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_sub_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -738,10 +738,10 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -769,10 +769,10 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -804,10 +804,10 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -835,10 +835,10 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -870,10 +870,10 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -901,10 +901,10 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -936,10 +936,10 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -967,10 +967,10 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1002,10 +1002,10 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_and_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1027,10 +1027,10 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_and_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1056,10 +1056,10 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_and_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1081,10 +1081,10 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_and_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1110,10 +1110,10 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_and_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1135,10 +1135,10 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_and_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1164,10 +1164,10 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_and_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1189,10 +1189,10 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_and_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1218,10 +1218,10 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_and_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1243,10 +1243,10 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_and_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1272,10 +1272,10 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_nand_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1304,10 +1304,10 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_nand_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1340,10 +1340,10 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1372,10 +1372,10 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1408,10 +1408,10 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1440,10 +1440,10 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1476,10 +1476,10 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1508,10 +1508,10 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1544,10 +1544,10 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1576,10 +1576,10 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1612,10 +1612,10 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_or_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1633,10 +1633,10 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_or_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1658,10 +1658,10 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_or_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1679,10 +1679,10 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_or_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1704,10 +1704,10 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_or_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1725,10 +1725,10 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_or_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1750,10 +1750,10 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_or_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1771,10 +1771,10 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_or_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1796,10 +1796,10 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_or_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1817,10 +1817,10 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_or_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1842,10 +1842,10 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_xor_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1863,10 +1863,10 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_xor_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1888,10 +1888,10 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1909,10 +1909,10 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1934,10 +1934,10 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -1955,10 +1955,10 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -1980,10 +1980,10 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -2001,10 +2001,10 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2026,10 +2026,10 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -2047,10 +2047,10 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -2072,10 +2072,10 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2089,7 +2089,7 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB35_4
 ; RV32I-NEXT:  .LBB35_2: # %atomicrmw.start
@@ -2104,10 +2104,10 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB35_1
 ; RV32I-NEXT:  .LBB35_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -2144,10 +2144,10 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -2161,7 +2161,7 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB35_4
 ; RV64I-NEXT:  .LBB35_2: # %atomicrmw.start
@@ -2176,10 +2176,10 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB35_1
 ; RV64I-NEXT:  .LBB35_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -2220,10 +2220,10 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2237,7 +2237,7 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB36_4
 ; RV32I-NEXT:  .LBB36_2: # %atomicrmw.start
@@ -2252,10 +2252,10 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB36_1
 ; RV32I-NEXT:  .LBB36_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -2292,10 +2292,10 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -2309,7 +2309,7 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB36_4
 ; RV64I-NEXT:  .LBB36_2: # %atomicrmw.start
@@ -2324,10 +2324,10 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB36_1
 ; RV64I-NEXT:  .LBB36_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -2368,10 +2368,10 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2385,7 +2385,7 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB37_4
 ; RV32I-NEXT:  .LBB37_2: # %atomicrmw.start
@@ -2400,10 +2400,10 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB37_1
 ; RV32I-NEXT:  .LBB37_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -2440,10 +2440,10 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -2457,7 +2457,7 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB37_4
 ; RV64I-NEXT:  .LBB37_2: # %atomicrmw.start
@@ -2472,10 +2472,10 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB37_1
 ; RV64I-NEXT:  .LBB37_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -2516,10 +2516,10 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2533,7 +2533,7 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB38_4
 ; RV32I-NEXT:  .LBB38_2: # %atomicrmw.start
@@ -2548,10 +2548,10 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB38_1
 ; RV32I-NEXT:  .LBB38_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -2588,10 +2588,10 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -2605,7 +2605,7 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB38_4
 ; RV64I-NEXT:  .LBB38_2: # %atomicrmw.start
@@ -2620,10 +2620,10 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB38_1
 ; RV64I-NEXT:  .LBB38_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -2664,10 +2664,10 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2681,7 +2681,7 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB39_4
 ; RV32I-NEXT:  .LBB39_2: # %atomicrmw.start
@@ -2696,10 +2696,10 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB39_1
 ; RV32I-NEXT:  .LBB39_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -2736,10 +2736,10 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -2753,7 +2753,7 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB39_4
 ; RV64I-NEXT:  .LBB39_2: # %atomicrmw.start
@@ -2768,10 +2768,10 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB39_1
 ; RV64I-NEXT:  .LBB39_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -2812,10 +2812,10 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2829,7 +2829,7 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB40_4
 ; RV32I-NEXT:  .LBB40_2: # %atomicrmw.start
@@ -2844,10 +2844,10 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB40_1
 ; RV32I-NEXT:  .LBB40_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -2884,10 +2884,10 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -2901,7 +2901,7 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB40_4
 ; RV64I-NEXT:  .LBB40_2: # %atomicrmw.start
@@ -2916,10 +2916,10 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB40_1
 ; RV64I-NEXT:  .LBB40_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -2960,10 +2960,10 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -2977,7 +2977,7 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB41_4
 ; RV32I-NEXT:  .LBB41_2: # %atomicrmw.start
@@ -2992,10 +2992,10 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB41_1
 ; RV32I-NEXT:  .LBB41_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3032,10 +3032,10 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3049,7 +3049,7 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB41_4
 ; RV64I-NEXT:  .LBB41_2: # %atomicrmw.start
@@ -3064,10 +3064,10 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB41_1
 ; RV64I-NEXT:  .LBB41_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3108,10 +3108,10 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3125,7 +3125,7 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB42_4
 ; RV32I-NEXT:  .LBB42_2: # %atomicrmw.start
@@ -3140,10 +3140,10 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB42_1
 ; RV32I-NEXT:  .LBB42_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3180,10 +3180,10 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3197,7 +3197,7 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB42_4
 ; RV64I-NEXT:  .LBB42_2: # %atomicrmw.start
@@ -3212,10 +3212,10 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB42_1
 ; RV64I-NEXT:  .LBB42_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3256,10 +3256,10 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3273,7 +3273,7 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB43_4
 ; RV32I-NEXT:  .LBB43_2: # %atomicrmw.start
@@ -3288,10 +3288,10 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB43_1
 ; RV32I-NEXT:  .LBB43_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3328,10 +3328,10 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3345,7 +3345,7 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB43_4
 ; RV64I-NEXT:  .LBB43_2: # %atomicrmw.start
@@ -3360,10 +3360,10 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB43_1
 ; RV64I-NEXT:  .LBB43_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3404,10 +3404,10 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3421,7 +3421,7 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB44_4
 ; RV32I-NEXT:  .LBB44_2: # %atomicrmw.start
@@ -3436,10 +3436,10 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB44_1
 ; RV32I-NEXT:  .LBB44_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3476,10 +3476,10 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3493,7 +3493,7 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB44_4
 ; RV64I-NEXT:  .LBB44_2: # %atomicrmw.start
@@ -3508,10 +3508,10 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB44_1
 ; RV64I-NEXT:  .LBB44_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3552,10 +3552,10 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3568,7 +3568,7 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB45_4
 ; RV32I-NEXT:  .LBB45_2: # %atomicrmw.start
@@ -3582,10 +3582,10 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB45_1
 ; RV32I-NEXT:  .LBB45_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3617,10 +3617,10 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3633,7 +3633,7 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB45_4
 ; RV64I-NEXT:  .LBB45_2: # %atomicrmw.start
@@ -3647,10 +3647,10 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB45_1
 ; RV64I-NEXT:  .LBB45_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3686,10 +3686,10 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3702,7 +3702,7 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB46_4
 ; RV32I-NEXT:  .LBB46_2: # %atomicrmw.start
@@ -3716,10 +3716,10 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB46_1
 ; RV32I-NEXT:  .LBB46_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3751,10 +3751,10 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3767,7 +3767,7 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB46_4
 ; RV64I-NEXT:  .LBB46_2: # %atomicrmw.start
@@ -3781,10 +3781,10 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB46_1
 ; RV64I-NEXT:  .LBB46_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3820,10 +3820,10 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3836,7 +3836,7 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB47_4
 ; RV32I-NEXT:  .LBB47_2: # %atomicrmw.start
@@ -3850,10 +3850,10 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB47_1
 ; RV32I-NEXT:  .LBB47_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -3885,10 +3885,10 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -3901,7 +3901,7 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB47_4
 ; RV64I-NEXT:  .LBB47_2: # %atomicrmw.start
@@ -3915,10 +3915,10 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB47_1
 ; RV64I-NEXT:  .LBB47_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -3954,10 +3954,10 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -3970,7 +3970,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB48_4
 ; RV32I-NEXT:  .LBB48_2: # %atomicrmw.start
@@ -3984,10 +3984,10 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB48_1
 ; RV32I-NEXT:  .LBB48_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4019,10 +4019,10 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4035,7 +4035,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB48_4
 ; RV64I-NEXT:  .LBB48_2: # %atomicrmw.start
@@ -4049,10 +4049,10 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB48_1
 ; RV64I-NEXT:  .LBB48_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4088,10 +4088,10 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -4104,7 +4104,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB49_4
 ; RV32I-NEXT:  .LBB49_2: # %atomicrmw.start
@@ -4118,10 +4118,10 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB49_1
 ; RV32I-NEXT:  .LBB49_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4153,10 +4153,10 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4169,7 +4169,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB49_4
 ; RV64I-NEXT:  .LBB49_2: # %atomicrmw.start
@@ -4183,10 +4183,10 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB49_1
 ; RV64I-NEXT:  .LBB49_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4222,10 +4222,10 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i8_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -4238,7 +4238,7 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB50_4
 ; RV32I-NEXT:  .LBB50_2: # %atomicrmw.start
@@ -4252,10 +4252,10 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB50_1
 ; RV32I-NEXT:  .LBB50_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4287,10 +4287,10 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i8_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4303,7 +4303,7 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB50_4
 ; RV64I-NEXT:  .LBB50_2: # %atomicrmw.start
@@ -4317,10 +4317,10 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB50_1
 ; RV64I-NEXT:  .LBB50_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4356,10 +4356,10 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i8_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -4372,7 +4372,7 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB51_4
 ; RV32I-NEXT:  .LBB51_2: # %atomicrmw.start
@@ -4386,10 +4386,10 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB51_1
 ; RV32I-NEXT:  .LBB51_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4421,10 +4421,10 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i8_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4437,7 +4437,7 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB51_4
 ; RV64I-NEXT:  .LBB51_2: # %atomicrmw.start
@@ -4451,10 +4451,10 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB51_1
 ; RV64I-NEXT:  .LBB51_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4490,10 +4490,10 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i8_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -4506,7 +4506,7 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB52_4
 ; RV32I-NEXT:  .LBB52_2: # %atomicrmw.start
@@ -4520,10 +4520,10 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB52_1
 ; RV32I-NEXT:  .LBB52_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4555,10 +4555,10 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i8_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4571,7 +4571,7 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB52_4
 ; RV64I-NEXT:  .LBB52_2: # %atomicrmw.start
@@ -4585,10 +4585,10 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB52_1
 ; RV64I-NEXT:  .LBB52_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4624,10 +4624,10 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i8_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -4640,7 +4640,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB53_4
 ; RV32I-NEXT:  .LBB53_2: # %atomicrmw.start
@@ -4654,10 +4654,10 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB53_1
 ; RV32I-NEXT:  .LBB53_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4689,10 +4689,10 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i8_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4705,7 +4705,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB53_4
 ; RV64I-NEXT:  .LBB53_2: # %atomicrmw.start
@@ -4719,10 +4719,10 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB53_1
 ; RV64I-NEXT:  .LBB53_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4758,10 +4758,10 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i8_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -4774,7 +4774,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB54_4
 ; RV32I-NEXT:  .LBB54_2: # %atomicrmw.start
@@ -4788,10 +4788,10 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    j .LBB54_1
 ; RV32I-NEXT:  .LBB54_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -4823,10 +4823,10 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i8_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -4839,7 +4839,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB54_4
 ; RV64I-NEXT:  .LBB54_2: # %atomicrmw.start
@@ -4853,10 +4853,10 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    j .LBB54_1
 ; RV64I-NEXT:  .LBB54_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -4892,10 +4892,10 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -4924,10 +4924,10 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -4960,10 +4960,10 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -4992,10 +4992,10 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5028,10 +5028,10 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5060,10 +5060,10 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5096,10 +5096,10 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5128,10 +5128,10 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5164,10 +5164,10 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_exchange_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5196,10 +5196,10 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_exchange_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5232,10 +5232,10 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_add_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5264,10 +5264,10 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_add_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5300,10 +5300,10 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_add_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5332,10 +5332,10 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_add_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5368,10 +5368,10 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_add_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5400,10 +5400,10 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_add_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5436,10 +5436,10 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_add_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5468,10 +5468,10 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_add_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5504,10 +5504,10 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_add_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5536,10 +5536,10 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_add_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5572,10 +5572,10 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_sub_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5604,10 +5604,10 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_sub_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5640,10 +5640,10 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5672,10 +5672,10 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5708,10 +5708,10 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5740,10 +5740,10 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5776,10 +5776,10 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5808,10 +5808,10 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5844,10 +5844,10 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5876,10 +5876,10 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5912,10 +5912,10 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_and_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5938,10 +5938,10 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_and_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -5968,10 +5968,10 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_and_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -5994,10 +5994,10 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_and_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6024,10 +6024,10 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_and_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6050,10 +6050,10 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_and_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6080,10 +6080,10 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_and_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6106,10 +6106,10 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_and_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6136,10 +6136,10 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_and_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6162,10 +6162,10 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_and_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6192,10 +6192,10 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_nand_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6225,10 +6225,10 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_nand_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6262,10 +6262,10 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6295,10 +6295,10 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6332,10 +6332,10 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6365,10 +6365,10 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6402,10 +6402,10 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6435,10 +6435,10 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6472,10 +6472,10 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6505,10 +6505,10 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6542,10 +6542,10 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_or_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6565,10 +6565,10 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_or_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6592,10 +6592,10 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_or_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6615,10 +6615,10 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_or_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6642,10 +6642,10 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_or_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6665,10 +6665,10 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_or_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6692,10 +6692,10 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_or_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6715,10 +6715,10 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_or_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6742,10 +6742,10 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_or_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6765,10 +6765,10 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_or_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6792,10 +6792,10 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_xor_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6815,10 +6815,10 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_xor_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6842,10 +6842,10 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6865,10 +6865,10 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6892,10 +6892,10 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6915,10 +6915,10 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6942,10 +6942,10 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -6965,10 +6965,10 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -6992,10 +6992,10 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -7015,10 +7015,10 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -7042,10 +7042,10 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7059,7 +7059,7 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB90_4
 ; RV32I-NEXT:  .LBB90_2: # %atomicrmw.start
@@ -7074,10 +7074,10 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB90_1
 ; RV32I-NEXT:  .LBB90_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -7115,10 +7115,10 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -7132,7 +7132,7 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB90_4
 ; RV64I-NEXT:  .LBB90_2: # %atomicrmw.start
@@ -7147,10 +7147,10 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB90_1
 ; RV64I-NEXT:  .LBB90_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -7192,10 +7192,10 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7209,7 +7209,7 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB91_4
 ; RV32I-NEXT:  .LBB91_2: # %atomicrmw.start
@@ -7224,10 +7224,10 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB91_1
 ; RV32I-NEXT:  .LBB91_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -7265,10 +7265,10 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -7282,7 +7282,7 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB91_4
 ; RV64I-NEXT:  .LBB91_2: # %atomicrmw.start
@@ -7297,10 +7297,10 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB91_1
 ; RV64I-NEXT:  .LBB91_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -7342,10 +7342,10 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7359,7 +7359,7 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB92_4
 ; RV32I-NEXT:  .LBB92_2: # %atomicrmw.start
@@ -7374,10 +7374,10 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB92_1
 ; RV32I-NEXT:  .LBB92_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -7415,10 +7415,10 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -7432,7 +7432,7 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB92_4
 ; RV64I-NEXT:  .LBB92_2: # %atomicrmw.start
@@ -7447,10 +7447,10 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB92_1
 ; RV64I-NEXT:  .LBB92_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -7492,10 +7492,10 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7509,7 +7509,7 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB93_4
 ; RV32I-NEXT:  .LBB93_2: # %atomicrmw.start
@@ -7524,10 +7524,10 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB93_1
 ; RV32I-NEXT:  .LBB93_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -7565,10 +7565,10 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -7582,7 +7582,7 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB93_4
 ; RV64I-NEXT:  .LBB93_2: # %atomicrmw.start
@@ -7597,10 +7597,10 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB93_1
 ; RV64I-NEXT:  .LBB93_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -7642,10 +7642,10 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7659,7 +7659,7 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB94_4
 ; RV32I-NEXT:  .LBB94_2: # %atomicrmw.start
@@ -7674,10 +7674,10 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB94_1
 ; RV32I-NEXT:  .LBB94_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -7715,10 +7715,10 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -7732,7 +7732,7 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB94_4
 ; RV64I-NEXT:  .LBB94_2: # %atomicrmw.start
@@ -7747,10 +7747,10 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB94_1
 ; RV64I-NEXT:  .LBB94_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -7792,10 +7792,10 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7809,7 +7809,7 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB95_4
 ; RV32I-NEXT:  .LBB95_2: # %atomicrmw.start
@@ -7824,10 +7824,10 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB95_1
 ; RV32I-NEXT:  .LBB95_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -7865,10 +7865,10 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -7882,7 +7882,7 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB95_4
 ; RV64I-NEXT:  .LBB95_2: # %atomicrmw.start
@@ -7897,10 +7897,10 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB95_1
 ; RV64I-NEXT:  .LBB95_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -7942,10 +7942,10 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -7959,7 +7959,7 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB96_4
 ; RV32I-NEXT:  .LBB96_2: # %atomicrmw.start
@@ -7974,10 +7974,10 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB96_1
 ; RV32I-NEXT:  .LBB96_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8015,10 +8015,10 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -8032,7 +8032,7 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB96_4
 ; RV64I-NEXT:  .LBB96_2: # %atomicrmw.start
@@ -8047,10 +8047,10 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB96_1
 ; RV64I-NEXT:  .LBB96_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8092,10 +8092,10 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -8109,7 +8109,7 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB97_4
 ; RV32I-NEXT:  .LBB97_2: # %atomicrmw.start
@@ -8124,10 +8124,10 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB97_1
 ; RV32I-NEXT:  .LBB97_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8165,10 +8165,10 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -8182,7 +8182,7 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB97_4
 ; RV64I-NEXT:  .LBB97_2: # %atomicrmw.start
@@ -8197,10 +8197,10 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB97_1
 ; RV64I-NEXT:  .LBB97_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8242,10 +8242,10 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -8259,7 +8259,7 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB98_4
 ; RV32I-NEXT:  .LBB98_2: # %atomicrmw.start
@@ -8274,10 +8274,10 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB98_1
 ; RV32I-NEXT:  .LBB98_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8315,10 +8315,10 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -8332,7 +8332,7 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB98_4
 ; RV64I-NEXT:  .LBB98_2: # %atomicrmw.start
@@ -8347,10 +8347,10 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB98_1
 ; RV64I-NEXT:  .LBB98_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8392,10 +8392,10 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lhu a3, 0(a0)
 ; RV32I-NEXT:    mv s2, a1
@@ -8409,7 +8409,7 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB99_4
 ; RV32I-NEXT:  .LBB99_2: # %atomicrmw.start
@@ -8424,10 +8424,10 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB99_1
 ; RV32I-NEXT:  .LBB99_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8465,10 +8465,10 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lhu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -8482,7 +8482,7 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB99_4
 ; RV64I-NEXT:  .LBB99_2: # %atomicrmw.start
@@ -8497,10 +8497,10 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB99_1
 ; RV64I-NEXT:  .LBB99_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8542,11 +8542,11 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -8561,7 +8561,7 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB100_4
 ; RV32I-NEXT:  .LBB100_2: # %atomicrmw.start
@@ -8575,11 +8575,11 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB100_1
 ; RV32I-NEXT:  .LBB100_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8612,11 +8612,11 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -8631,7 +8631,7 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB100_4
 ; RV64I-NEXT:  .LBB100_2: # %atomicrmw.start
@@ -8645,11 +8645,11 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB100_1
 ; RV64I-NEXT:  .LBB100_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8686,11 +8686,11 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -8705,7 +8705,7 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB101_4
 ; RV32I-NEXT:  .LBB101_2: # %atomicrmw.start
@@ -8719,11 +8719,11 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB101_1
 ; RV32I-NEXT:  .LBB101_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8756,11 +8756,11 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -8775,7 +8775,7 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB101_4
 ; RV64I-NEXT:  .LBB101_2: # %atomicrmw.start
@@ -8789,11 +8789,11 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB101_1
 ; RV64I-NEXT:  .LBB101_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8830,11 +8830,11 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -8849,7 +8849,7 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB102_4
 ; RV32I-NEXT:  .LBB102_2: # %atomicrmw.start
@@ -8863,11 +8863,11 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB102_1
 ; RV32I-NEXT:  .LBB102_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -8900,11 +8900,11 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -8919,7 +8919,7 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB102_4
 ; RV64I-NEXT:  .LBB102_2: # %atomicrmw.start
@@ -8933,11 +8933,11 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB102_1
 ; RV64I-NEXT:  .LBB102_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -8974,11 +8974,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -8993,7 +8993,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB103_4
 ; RV32I-NEXT:  .LBB103_2: # %atomicrmw.start
@@ -9007,11 +9007,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB103_1
 ; RV32I-NEXT:  .LBB103_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9044,11 +9044,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9063,7 +9063,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB103_4
 ; RV64I-NEXT:  .LBB103_2: # %atomicrmw.start
@@ -9077,11 +9077,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB103_1
 ; RV64I-NEXT:  .LBB103_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9118,11 +9118,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -9137,7 +9137,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB104_4
 ; RV32I-NEXT:  .LBB104_2: # %atomicrmw.start
@@ -9151,11 +9151,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB104_1
 ; RV32I-NEXT:  .LBB104_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9188,11 +9188,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9207,7 +9207,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB104_4
 ; RV64I-NEXT:  .LBB104_2: # %atomicrmw.start
@@ -9221,11 +9221,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB104_1
 ; RV64I-NEXT:  .LBB104_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9262,11 +9262,11 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -9281,7 +9281,7 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB105_4
 ; RV32I-NEXT:  .LBB105_2: # %atomicrmw.start
@@ -9295,11 +9295,11 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB105_1
 ; RV32I-NEXT:  .LBB105_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9332,11 +9332,11 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9351,7 +9351,7 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB105_4
 ; RV64I-NEXT:  .LBB105_2: # %atomicrmw.start
@@ -9365,11 +9365,11 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB105_1
 ; RV64I-NEXT:  .LBB105_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9406,11 +9406,11 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -9425,7 +9425,7 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB106_4
 ; RV32I-NEXT:  .LBB106_2: # %atomicrmw.start
@@ -9439,11 +9439,11 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB106_1
 ; RV32I-NEXT:  .LBB106_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9476,11 +9476,11 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9495,7 +9495,7 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB106_4
 ; RV64I-NEXT:  .LBB106_2: # %atomicrmw.start
@@ -9509,11 +9509,11 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB106_1
 ; RV64I-NEXT:  .LBB106_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9550,11 +9550,11 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -9569,7 +9569,7 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB107_4
 ; RV32I-NEXT:  .LBB107_2: # %atomicrmw.start
@@ -9583,11 +9583,11 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB107_1
 ; RV32I-NEXT:  .LBB107_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9620,11 +9620,11 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9639,7 +9639,7 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB107_4
 ; RV64I-NEXT:  .LBB107_2: # %atomicrmw.start
@@ -9653,11 +9653,11 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB107_1
 ; RV64I-NEXT:  .LBB107_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9694,11 +9694,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -9713,7 +9713,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB108_4
 ; RV32I-NEXT:  .LBB108_2: # %atomicrmw.start
@@ -9727,11 +9727,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB108_1
 ; RV32I-NEXT:  .LBB108_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9764,11 +9764,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9783,7 +9783,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB108_4
 ; RV64I-NEXT:  .LBB108_2: # %atomicrmw.start
@@ -9797,11 +9797,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB108_1
 ; RV64I-NEXT:  .LBB108_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9838,11 +9838,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    lhu a1, 0(a0)
@@ -9857,7 +9857,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB109_4
 ; RV32I-NEXT:  .LBB109_2: # %atomicrmw.start
@@ -9871,11 +9871,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    j .LBB109_1
 ; RV32I-NEXT:  .LBB109_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -9908,11 +9908,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    lhu a1, 0(a0)
@@ -9927,7 +9927,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB109_4
 ; RV64I-NEXT:  .LBB109_2: # %atomicrmw.start
@@ -9941,11 +9941,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    j .LBB109_1
 ; RV64I-NEXT:  .LBB109_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -9982,10 +9982,10 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -9997,10 +9997,10 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10016,10 +10016,10 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10031,10 +10031,10 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10050,10 +10050,10 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10065,10 +10065,10 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10084,10 +10084,10 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10099,10 +10099,10 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10118,10 +10118,10 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_exchange_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10133,10 +10133,10 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xchg_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_exchange_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10152,10 +10152,10 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_add_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10167,10 +10167,10 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_add_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10186,10 +10186,10 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_add_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10201,10 +10201,10 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_add_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10220,10 +10220,10 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_add_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10235,10 +10235,10 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_add_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10254,10 +10254,10 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_add_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10269,10 +10269,10 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_add_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10288,10 +10288,10 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_add_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10303,10 +10303,10 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_add_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_add_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10322,10 +10322,10 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_sub_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10338,10 +10338,10 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_sub_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10358,10 +10358,10 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10374,10 +10374,10 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10394,10 +10394,10 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10410,10 +10410,10 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10430,10 +10430,10 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10446,10 +10446,10 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10466,10 +10466,10 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10482,10 +10482,10 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_sub_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10502,10 +10502,10 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_and_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10517,10 +10517,10 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_and_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10536,10 +10536,10 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_and_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10551,10 +10551,10 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_and_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10570,10 +10570,10 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_and_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10585,10 +10585,10 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_and_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10604,10 +10604,10 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_and_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10619,10 +10619,10 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_and_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10638,10 +10638,10 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_and_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10653,10 +10653,10 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_and_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_and_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10672,10 +10672,10 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_nand_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10694,10 +10694,10 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_nand_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10720,10 +10720,10 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10742,10 +10742,10 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10768,10 +10768,10 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10790,10 +10790,10 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10816,10 +10816,10 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10838,10 +10838,10 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10864,10 +10864,10 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10886,10 +10886,10 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_nand_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10912,10 +10912,10 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_or_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10927,10 +10927,10 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_or_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10946,10 +10946,10 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_or_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10961,10 +10961,10 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_or_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -10980,10 +10980,10 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_or_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -10995,10 +10995,10 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_or_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11014,10 +11014,10 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_or_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11029,10 +11029,10 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_or_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11048,10 +11048,10 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_or_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11063,10 +11063,10 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_or_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_or_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11082,10 +11082,10 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    call __atomic_fetch_xor_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11097,10 +11097,10 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_xor_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11116,10 +11116,10 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11131,10 +11131,10 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11150,10 +11150,10 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11165,10 +11165,10 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11184,10 +11184,10 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11199,10 +11199,10 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11218,10 +11218,10 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_4
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11233,10 +11233,10 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_xor_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_4
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -11252,9 +11252,9 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11266,7 +11266,7 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB145_4
 ; RV32I-NEXT:  .LBB145_2: # %atomicrmw.start
@@ -11279,9 +11279,9 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB145_1
 ; RV32I-NEXT:  .LBB145_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11293,10 +11293,10 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11309,7 +11309,7 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB145_4
 ; RV64I-NEXT:  .LBB145_2: # %atomicrmw.start
@@ -11323,10 +11323,10 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB145_1
 ; RV64I-NEXT:  .LBB145_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11342,9 +11342,9 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11356,7 +11356,7 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB146_4
 ; RV32I-NEXT:  .LBB146_2: # %atomicrmw.start
@@ -11369,9 +11369,9 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB146_1
 ; RV32I-NEXT:  .LBB146_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11383,10 +11383,10 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11399,7 +11399,7 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB146_4
 ; RV64I-NEXT:  .LBB146_2: # %atomicrmw.start
@@ -11413,10 +11413,10 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB146_1
 ; RV64I-NEXT:  .LBB146_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11432,9 +11432,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11446,7 +11446,7 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB147_4
 ; RV32I-NEXT:  .LBB147_2: # %atomicrmw.start
@@ -11459,9 +11459,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB147_1
 ; RV32I-NEXT:  .LBB147_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11473,10 +11473,10 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11489,7 +11489,7 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB147_4
 ; RV64I-NEXT:  .LBB147_2: # %atomicrmw.start
@@ -11503,10 +11503,10 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB147_1
 ; RV64I-NEXT:  .LBB147_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11522,9 +11522,9 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11536,7 +11536,7 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB148_4
 ; RV32I-NEXT:  .LBB148_2: # %atomicrmw.start
@@ -11549,9 +11549,9 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB148_1
 ; RV32I-NEXT:  .LBB148_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11563,10 +11563,10 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11579,7 +11579,7 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB148_4
 ; RV64I-NEXT:  .LBB148_2: # %atomicrmw.start
@@ -11593,10 +11593,10 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB148_1
 ; RV64I-NEXT:  .LBB148_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11612,9 +11612,9 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11626,7 +11626,7 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB149_4
 ; RV32I-NEXT:  .LBB149_2: # %atomicrmw.start
@@ -11639,9 +11639,9 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB149_1
 ; RV32I-NEXT:  .LBB149_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11653,10 +11653,10 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_max_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11669,7 +11669,7 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB149_4
 ; RV64I-NEXT:  .LBB149_2: # %atomicrmw.start
@@ -11683,10 +11683,10 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB149_1
 ; RV64I-NEXT:  .LBB149_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11702,9 +11702,9 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11716,7 +11716,7 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB150_4
 ; RV32I-NEXT:  .LBB150_2: # %atomicrmw.start
@@ -11729,9 +11729,9 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB150_1
 ; RV32I-NEXT:  .LBB150_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11743,10 +11743,10 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11759,7 +11759,7 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB150_4
 ; RV64I-NEXT:  .LBB150_2: # %atomicrmw.start
@@ -11773,10 +11773,10 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB150_1
 ; RV64I-NEXT:  .LBB150_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11792,9 +11792,9 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11806,7 +11806,7 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB151_4
 ; RV32I-NEXT:  .LBB151_2: # %atomicrmw.start
@@ -11819,9 +11819,9 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB151_1
 ; RV32I-NEXT:  .LBB151_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11833,10 +11833,10 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11849,7 +11849,7 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB151_4
 ; RV64I-NEXT:  .LBB151_2: # %atomicrmw.start
@@ -11863,10 +11863,10 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB151_1
 ; RV64I-NEXT:  .LBB151_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11882,9 +11882,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11896,7 +11896,7 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB152_4
 ; RV32I-NEXT:  .LBB152_2: # %atomicrmw.start
@@ -11909,9 +11909,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB152_1
 ; RV32I-NEXT:  .LBB152_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -11923,10 +11923,10 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -11939,7 +11939,7 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB152_4
 ; RV64I-NEXT:  .LBB152_2: # %atomicrmw.start
@@ -11953,10 +11953,10 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB152_1
 ; RV64I-NEXT:  .LBB152_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -11972,9 +11972,9 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -11986,7 +11986,7 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB153_4
 ; RV32I-NEXT:  .LBB153_2: # %atomicrmw.start
@@ -11999,9 +11999,9 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB153_1
 ; RV32I-NEXT:  .LBB153_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12013,10 +12013,10 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12029,7 +12029,7 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB153_4
 ; RV64I-NEXT:  .LBB153_2: # %atomicrmw.start
@@ -12043,10 +12043,10 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB153_1
 ; RV64I-NEXT:  .LBB153_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12062,9 +12062,9 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12076,7 +12076,7 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB154_4
 ; RV32I-NEXT:  .LBB154_2: # %atomicrmw.start
@@ -12089,9 +12089,9 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB154_1
 ; RV32I-NEXT:  .LBB154_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12103,10 +12103,10 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_min_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12119,7 +12119,7 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB154_4
 ; RV64I-NEXT:  .LBB154_2: # %atomicrmw.start
@@ -12133,10 +12133,10 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB154_1
 ; RV64I-NEXT:  .LBB154_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12152,9 +12152,9 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12166,7 +12166,7 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB155_4
 ; RV32I-NEXT:  .LBB155_2: # %atomicrmw.start
@@ -12179,9 +12179,9 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB155_1
 ; RV32I-NEXT:  .LBB155_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12193,10 +12193,10 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12209,7 +12209,7 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB155_4
 ; RV64I-NEXT:  .LBB155_2: # %atomicrmw.start
@@ -12223,10 +12223,10 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB155_1
 ; RV64I-NEXT:  .LBB155_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12242,9 +12242,9 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12256,7 +12256,7 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB156_4
 ; RV32I-NEXT:  .LBB156_2: # %atomicrmw.start
@@ -12269,9 +12269,9 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB156_1
 ; RV32I-NEXT:  .LBB156_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12283,10 +12283,10 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12299,7 +12299,7 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB156_4
 ; RV64I-NEXT:  .LBB156_2: # %atomicrmw.start
@@ -12313,10 +12313,10 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB156_1
 ; RV64I-NEXT:  .LBB156_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12332,9 +12332,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12346,7 +12346,7 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB157_4
 ; RV32I-NEXT:  .LBB157_2: # %atomicrmw.start
@@ -12359,9 +12359,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB157_1
 ; RV32I-NEXT:  .LBB157_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12373,10 +12373,10 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12389,7 +12389,7 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB157_4
 ; RV64I-NEXT:  .LBB157_2: # %atomicrmw.start
@@ -12403,10 +12403,10 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB157_1
 ; RV64I-NEXT:  .LBB157_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12422,9 +12422,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12436,7 +12436,7 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB158_4
 ; RV32I-NEXT:  .LBB158_2: # %atomicrmw.start
@@ -12449,9 +12449,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB158_1
 ; RV32I-NEXT:  .LBB158_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12463,10 +12463,10 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12479,7 +12479,7 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB158_4
 ; RV64I-NEXT:  .LBB158_2: # %atomicrmw.start
@@ -12493,10 +12493,10 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB158_1
 ; RV64I-NEXT:  .LBB158_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12512,9 +12512,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12526,7 +12526,7 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB159_4
 ; RV32I-NEXT:  .LBB159_2: # %atomicrmw.start
@@ -12539,9 +12539,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB159_1
 ; RV32I-NEXT:  .LBB159_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12553,10 +12553,10 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umax_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12569,7 +12569,7 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB159_4
 ; RV64I-NEXT:  .LBB159_2: # %atomicrmw.start
@@ -12583,10 +12583,10 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB159_1
 ; RV64I-NEXT:  .LBB159_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12602,9 +12602,9 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i32_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12616,7 +12616,7 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, zero
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB160_4
 ; RV32I-NEXT:  .LBB160_2: # %atomicrmw.start
@@ -12629,9 +12629,9 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB160_1
 ; RV32I-NEXT:  .LBB160_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12643,10 +12643,10 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i32_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12659,7 +12659,7 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB160_4
 ; RV64I-NEXT:  .LBB160_2: # %atomicrmw.start
@@ -12673,10 +12673,10 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB160_1
 ; RV64I-NEXT:  .LBB160_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12692,9 +12692,9 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i32_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12706,7 +12706,7 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB161_4
 ; RV32I-NEXT:  .LBB161_2: # %atomicrmw.start
@@ -12719,9 +12719,9 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB161_1
 ; RV32I-NEXT:  .LBB161_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12733,10 +12733,10 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i32_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12749,7 +12749,7 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB161_4
 ; RV64I-NEXT:  .LBB161_2: # %atomicrmw.start
@@ -12763,10 +12763,10 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB161_1
 ; RV64I-NEXT:  .LBB161_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12782,9 +12782,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i32_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12796,7 +12796,7 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB162_4
 ; RV32I-NEXT:  .LBB162_2: # %atomicrmw.start
@@ -12809,9 +12809,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB162_1
 ; RV32I-NEXT:  .LBB162_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12823,10 +12823,10 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i32_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12839,7 +12839,7 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB162_4
 ; RV64I-NEXT:  .LBB162_2: # %atomicrmw.start
@@ -12853,10 +12853,10 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB162_1
 ; RV64I-NEXT:  .LBB162_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12872,9 +12872,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i32_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12886,7 +12886,7 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB163_4
 ; RV32I-NEXT:  .LBB163_2: # %atomicrmw.start
@@ -12899,9 +12899,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB163_1
 ; RV32I-NEXT:  .LBB163_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -12913,10 +12913,10 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i32_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -12929,7 +12929,7 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB163_4
 ; RV64I-NEXT:  .LBB163_2: # %atomicrmw.start
@@ -12943,10 +12943,10 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB163_1
 ; RV64I-NEXT:  .LBB163_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -12962,9 +12962,9 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i32_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
@@ -12976,7 +12976,7 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB164_4
 ; RV32I-NEXT:  .LBB164_2: # %atomicrmw.start
@@ -12989,9 +12989,9 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    j .LBB164_1
 ; RV32I-NEXT:  .LBB164_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -13003,10 +13003,10 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-LABEL: atomicrmw_umin_i32_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lwu a3, 0(a0)
 ; RV64I-NEXT:    mv s2, a1
@@ -13019,7 +13019,7 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB164_4
 ; RV64I-NEXT:  .LBB164_2: # %atomicrmw.start
@@ -13033,10 +13033,10 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    j .LBB164_1
 ; RV64I-NEXT:  .LBB164_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -13052,30 +13052,30 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xchg_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xchg_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13091,30 +13091,30 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xchg_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xchg_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13130,30 +13130,30 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xchg_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xchg_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13169,30 +13169,30 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xchg_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xchg_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13208,30 +13208,30 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xchg_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_exchange_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_exchange_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xchg_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_exchange_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_exchange_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xchg_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_exchange_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_exchange_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13247,30 +13247,30 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_fetch_add_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_add_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_add_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_add_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13286,30 +13286,30 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_add_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_add_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_add_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_add_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13325,30 +13325,30 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_add_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_add_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_add_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_add_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13364,30 +13364,30 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_add_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_add_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_add_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_add_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13403,30 +13403,30 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_add_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_add_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_fetch_add_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_add_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_add_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13442,30 +13442,30 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_fetch_sub_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_sub_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_fetch_sub_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_sub_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_sub_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13482,30 +13482,30 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_sub_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_sub_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_fetch_sub_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_sub_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_sub_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13522,30 +13522,30 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_sub_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_sub_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_fetch_sub_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_sub_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_sub_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13562,30 +13562,30 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_sub_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_sub_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_fetch_sub_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_sub_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_sub_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13602,30 +13602,30 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_sub_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_sub_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_sub_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_fetch_sub_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_sub_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_sub_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13642,30 +13642,30 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_fetch_and_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_and_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_fetch_and_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_and_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_and_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13681,30 +13681,30 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_and_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_and_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_fetch_and_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_and_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_and_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13720,30 +13720,30 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_and_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_and_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_fetch_and_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_and_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_and_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13759,30 +13759,30 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_and_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_and_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_fetch_and_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_and_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_and_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13798,30 +13798,30 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_and_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_and_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_and_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_fetch_and_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_and_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_and_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13837,30 +13837,30 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_fetch_nand_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_fetch_nand_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_nand_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_nand_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13883,30 +13883,30 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_nand_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_fetch_nand_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_nand_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_nand_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13929,30 +13929,30 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_nand_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_fetch_nand_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_nand_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_nand_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -13975,30 +13975,30 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_nand_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_fetch_nand_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_nand_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_nand_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14021,30 +14021,30 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_nand_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_nand_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_fetch_nand_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_nand_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_nand_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14067,30 +14067,30 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_fetch_or_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_or_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_fetch_or_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_or_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_or_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14106,30 +14106,30 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_or_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_or_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_fetch_or_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_or_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_or_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14145,30 +14145,30 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_or_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_or_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_fetch_or_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_or_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_or_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14184,30 +14184,30 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_or_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_or_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_fetch_or_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_or_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_or_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14223,30 +14223,30 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_or_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_or_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_or_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_fetch_or_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_or_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_or_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14262,30 +14262,30 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __atomic_fetch_xor_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xor_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv a3, zero
-; RV32IA-NEXT:    call __atomic_fetch_xor_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xor_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, zero
-; RV64I-NEXT:    call __atomic_fetch_xor_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14301,30 +14301,30 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    call __atomic_fetch_xor_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xor_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 2
-; RV32IA-NEXT:    call __atomic_fetch_xor_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xor_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 2
-; RV64I-NEXT:    call __atomic_fetch_xor_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14340,30 +14340,30 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    call __atomic_fetch_xor_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xor_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 3
-; RV32IA-NEXT:    call __atomic_fetch_xor_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xor_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    call __atomic_fetch_xor_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14379,30 +14379,30 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    call __atomic_fetch_xor_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xor_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 4
-; RV32IA-NEXT:    call __atomic_fetch_xor_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xor_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 4
-; RV64I-NEXT:    call __atomic_fetch_xor_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14418,30 +14418,30 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_xor_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    call __atomic_fetch_xor_8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_xor_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
-; RV32IA-NEXT:    sw ra, 12(sp)
+; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    addi a3, zero, 5
-; RV32IA-NEXT:    call __atomic_fetch_xor_8
-; RV32IA-NEXT:    lw ra, 12(sp)
+; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_xor_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a2, zero, 5
-; RV64I-NEXT:    call __atomic_fetch_xor_8
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -14457,10 +14457,10 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -14475,7 +14475,7 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB200_7
@@ -14501,20 +14501,20 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB200_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_max_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -14529,7 +14529,7 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a4, zero
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB200_7
@@ -14555,19 +14555,19 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB200_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -14579,7 +14579,7 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB200_4
 ; RV64I-NEXT:  .LBB200_2: # %atomicrmw.start
@@ -14592,9 +14592,9 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB200_1
 ; RV64I-NEXT:  .LBB200_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -14610,10 +14610,10 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -14628,7 +14628,7 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB201_7
@@ -14654,20 +14654,20 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB201_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_max_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -14682,7 +14682,7 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 2
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB201_7
@@ -14708,19 +14708,19 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB201_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -14732,7 +14732,7 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB201_4
 ; RV64I-NEXT:  .LBB201_2: # %atomicrmw.start
@@ -14745,9 +14745,9 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB201_1
 ; RV64I-NEXT:  .LBB201_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -14763,10 +14763,10 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -14781,7 +14781,7 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB202_7
@@ -14807,20 +14807,20 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB202_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_max_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -14835,7 +14835,7 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB202_7
@@ -14861,19 +14861,19 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB202_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -14885,7 +14885,7 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB202_4
 ; RV64I-NEXT:  .LBB202_2: # %atomicrmw.start
@@ -14898,9 +14898,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB202_1
 ; RV64I-NEXT:  .LBB202_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -14916,10 +14916,10 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -14934,7 +14934,7 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 4
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB203_7
@@ -14960,20 +14960,20 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB203_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_max_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -14988,7 +14988,7 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 4
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB203_7
@@ -15014,19 +15014,19 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB203_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15038,7 +15038,7 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB203_4
 ; RV64I-NEXT:  .LBB203_2: # %atomicrmw.start
@@ -15051,9 +15051,9 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB203_1
 ; RV64I-NEXT:  .LBB203_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15069,10 +15069,10 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_max_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -15087,7 +15087,7 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    addi a5, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB204_7
@@ -15113,20 +15113,20 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB204_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_max_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -15141,7 +15141,7 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 5
 ; RV32IA-NEXT:    addi a5, zero, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB204_7
@@ -15167,19 +15167,19 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB204_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_max_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15191,7 +15191,7 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB204_4
 ; RV64I-NEXT:  .LBB204_2: # %atomicrmw.start
@@ -15204,9 +15204,9 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB204_1
 ; RV64I-NEXT:  .LBB204_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15222,10 +15222,10 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -15240,7 +15240,7 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB205_7
@@ -15267,20 +15267,20 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB205_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_min_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -15295,7 +15295,7 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a4, zero
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB205_7
@@ -15322,19 +15322,19 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB205_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15346,7 +15346,7 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB205_4
 ; RV64I-NEXT:  .LBB205_2: # %atomicrmw.start
@@ -15359,9 +15359,9 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB205_1
 ; RV64I-NEXT:  .LBB205_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15377,10 +15377,10 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -15395,7 +15395,7 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB206_7
@@ -15422,20 +15422,20 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB206_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_min_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -15450,7 +15450,7 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 2
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB206_7
@@ -15477,19 +15477,19 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB206_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15501,7 +15501,7 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB206_4
 ; RV64I-NEXT:  .LBB206_2: # %atomicrmw.start
@@ -15514,9 +15514,9 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB206_1
 ; RV64I-NEXT:  .LBB206_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15532,10 +15532,10 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -15550,7 +15550,7 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB207_7
@@ -15577,20 +15577,20 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB207_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_min_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -15605,7 +15605,7 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB207_7
@@ -15632,19 +15632,19 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB207_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15656,7 +15656,7 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB207_4
 ; RV64I-NEXT:  .LBB207_2: # %atomicrmw.start
@@ -15669,9 +15669,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB207_1
 ; RV64I-NEXT:  .LBB207_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15687,10 +15687,10 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -15705,7 +15705,7 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 4
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB208_7
@@ -15732,20 +15732,20 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB208_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_min_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -15760,7 +15760,7 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 4
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB208_7
@@ -15787,19 +15787,19 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB208_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15811,7 +15811,7 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB208_4
 ; RV64I-NEXT:  .LBB208_2: # %atomicrmw.start
@@ -15824,9 +15824,9 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB208_1
 ; RV64I-NEXT:  .LBB208_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15842,10 +15842,10 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_min_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -15860,7 +15860,7 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    addi a5, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB209_7
@@ -15887,20 +15887,20 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB209_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_min_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -15915,7 +15915,7 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 5
 ; RV32IA-NEXT:    addi a5, zero, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB209_7
@@ -15942,19 +15942,19 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB209_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_min_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -15966,7 +15966,7 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB209_4
 ; RV64I-NEXT:  .LBB209_2: # %atomicrmw.start
@@ -15979,9 +15979,9 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB209_1
 ; RV64I-NEXT:  .LBB209_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -15997,10 +15997,10 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16015,7 +16015,7 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB210_7
@@ -16041,20 +16041,20 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB210_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umax_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16069,7 +16069,7 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a4, zero
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB210_7
@@ -16095,19 +16095,19 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB210_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umax_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -16119,7 +16119,7 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB210_4
 ; RV64I-NEXT:  .LBB210_2: # %atomicrmw.start
@@ -16132,9 +16132,9 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB210_1
 ; RV64I-NEXT:  .LBB210_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -16150,10 +16150,10 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16168,7 +16168,7 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB211_7
@@ -16194,20 +16194,20 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB211_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umax_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16222,7 +16222,7 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 2
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB211_7
@@ -16248,19 +16248,19 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB211_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umax_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -16272,7 +16272,7 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB211_4
 ; RV64I-NEXT:  .LBB211_2: # %atomicrmw.start
@@ -16285,9 +16285,9 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB211_1
 ; RV64I-NEXT:  .LBB211_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -16303,10 +16303,10 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16321,7 +16321,7 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB212_7
@@ -16347,20 +16347,20 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB212_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umax_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16375,7 +16375,7 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB212_7
@@ -16401,19 +16401,19 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB212_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umax_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -16425,7 +16425,7 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB212_4
 ; RV64I-NEXT:  .LBB212_2: # %atomicrmw.start
@@ -16438,9 +16438,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB212_1
 ; RV64I-NEXT:  .LBB212_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -16456,10 +16456,10 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16474,7 +16474,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 4
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB213_7
@@ -16500,20 +16500,20 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB213_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umax_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16528,7 +16528,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 4
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB213_7
@@ -16554,19 +16554,19 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB213_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umax_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -16578,7 +16578,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB213_4
 ; RV64I-NEXT:  .LBB213_2: # %atomicrmw.start
@@ -16591,9 +16591,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB213_1
 ; RV64I-NEXT:  .LBB213_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -16609,10 +16609,10 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umax_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16627,7 +16627,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    addi a5, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB214_7
@@ -16653,20 +16653,20 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB214_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umax_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16681,7 +16681,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 5
 ; RV32IA-NEXT:    addi a5, zero, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB214_7
@@ -16707,19 +16707,19 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB214_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umax_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -16731,7 +16731,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB214_4
 ; RV64I-NEXT:  .LBB214_2: # %atomicrmw.start
@@ -16744,9 +16744,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB214_1
 ; RV64I-NEXT:  .LBB214_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -16762,10 +16762,10 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i64_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16780,7 +16780,7 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a4, zero
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB215_7
@@ -16807,20 +16807,20 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB215_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umin_i64_monotonic:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16835,7 +16835,7 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a4, zero
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB215_7
@@ -16862,19 +16862,19 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB215_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umin_i64_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -16886,7 +16886,7 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a3, zero
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB215_4
 ; RV64I-NEXT:  .LBB215_2: # %atomicrmw.start
@@ -16899,9 +16899,9 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB215_1
 ; RV64I-NEXT:  .LBB215_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -16917,10 +16917,10 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i64_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -16935,7 +16935,7 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 2
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB216_7
@@ -16962,20 +16962,20 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB216_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umin_i64_acquire:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -16990,7 +16990,7 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 2
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB216_7
@@ -17017,19 +17017,19 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB216_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umin_i64_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -17041,7 +17041,7 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB216_4
 ; RV64I-NEXT:  .LBB216_2: # %atomicrmw.start
@@ -17054,9 +17054,9 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB216_1
 ; RV64I-NEXT:  .LBB216_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -17072,10 +17072,10 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i64_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -17090,7 +17090,7 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 3
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a5, zero
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB217_7
@@ -17117,20 +17117,20 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB217_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umin_i64_release:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -17145,7 +17145,7 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 3
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    mv a5, zero
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB217_7
@@ -17172,19 +17172,19 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB217_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umin_i64_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -17196,7 +17196,7 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a4, zero
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB217_4
 ; RV64I-NEXT:  .LBB217_2: # %atomicrmw.start
@@ -17209,9 +17209,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB217_1
 ; RV64I-NEXT:  .LBB217_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -17227,10 +17227,10 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i64_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -17245,7 +17245,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 4
 ; RV32I-NEXT:    addi a5, zero, 2
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB218_7
@@ -17272,20 +17272,20 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB218_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umin_i64_acq_rel:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -17300,7 +17300,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 4
 ; RV32IA-NEXT:    addi a5, zero, 2
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB218_7
@@ -17327,19 +17327,19 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB218_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umin_i64_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -17351,7 +17351,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB218_4
 ; RV64I-NEXT:  .LBB218_2: # %atomicrmw.start
@@ -17364,9 +17364,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB218_1
 ; RV64I-NEXT:  .LBB218_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -17382,10 +17382,10 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_umin_i64_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a5, 4(a0)
 ; RV32I-NEXT:    lw a4, 0(a0)
@@ -17400,7 +17400,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a4, zero, 5
 ; RV32I-NEXT:    addi a5, zero, 5
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
 ; RV32I-NEXT:    bnez a0, .LBB219_7
@@ -17427,20 +17427,20 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:  .LBB219_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32IA-LABEL: atomicrmw_umin_i64_seq_cst:
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp)
-; RV32IA-NEXT:    sw s0, 24(sp)
-; RV32IA-NEXT:    sw s1, 20(sp)
-; RV32IA-NEXT:    sw s2, 16(sp)
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32IA-NEXT:    mv s0, a0
 ; RV32IA-NEXT:    lw a5, 4(a0)
 ; RV32IA-NEXT:    lw a4, 0(a0)
@@ -17455,7 +17455,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    addi a4, zero, 5
 ; RV32IA-NEXT:    addi a5, zero, 5
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB219_7
@@ -17482,19 +17482,19 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:  .LBB219_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw s2, 16(sp)
-; RV32IA-NEXT:    lw s1, 20(sp)
-; RV32IA-NEXT:    lw s0, 24(sp)
-; RV32IA-NEXT:    lw ra, 28(sp)
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
 ;
 ; RV64I-LABEL: atomicrmw_umin_i64_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
@@ -17506,7 +17506,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB219_4
 ; RV64I-NEXT:  .LBB219_2: # %atomicrmw.start
@@ -17519,9 +17519,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    j .LBB219_1
 ; RV64I-NEXT:  .LBB219_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
index d896d9fe071c..c53b79913b1c 100644
--- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
@@ -80,7 +80,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a1, a0, 255
 ; RV32I-NEXT:    beqz a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
@@ -105,13 +105,13 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    j .LBB3_3
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    addi a0, zero, 8
 ; RV32I-NEXT:  .LBB3_3: # %cond.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
@@ -122,7 +122,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a1, a0, a1
@@ -149,13 +149,13 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    j .LBB4_3
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    addi a0, zero, 16
 ; RV32I-NEXT:  .LBB4_3: # %cond.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
@@ -166,7 +166,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    beqz a0, .LBB5_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi a1, a0, -1
@@ -190,13 +190,13 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    j .LBB5_3
 ; RV32I-NEXT:  .LBB5_2:
 ; RV32I-NEXT:    addi a0, zero, 32
 ; RV32I-NEXT:  .LBB5_3: # %cond.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
@@ -207,7 +207,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_ctlz_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    beqz a0, .LBB6_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    srli a1, a0, 1
@@ -239,13 +239,13 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    j .LBB6_3
 ; RV32I-NEXT:  .LBB6_2:
 ; RV32I-NEXT:    addi a0, zero, 32
 ; RV32I-NEXT:  .LBB6_3: # %cond.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
@@ -256,14 +256,14 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
-; RV32I-NEXT:    sw s6, 0(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a0, a0, -1
@@ -288,7 +288,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    addi a0, s3, -1
 ; RV32I-NEXT:    not a1, s3
@@ -304,7 +304,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    bnez s4, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -314,14 +314,14 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB7_3:
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    lw s6, 0(sp)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
@@ -332,7 +332,7 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i8_zero_undef:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    and a0, a0, a1
@@ -354,9 +354,9 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
@@ -367,7 +367,7 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i16_zero_undef:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    and a0, a0, a1
@@ -389,9 +389,9 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
@@ -402,7 +402,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i32_zero_undef:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    and a0, a0, a1
@@ -424,9 +424,9 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
@@ -437,14 +437,14 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i64_zero_undef:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
-; RV32I-NEXT:    sw s6, 0(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a0, a0, -1
@@ -469,7 +469,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    addi a0, s3, -1
 ; RV32I-NEXT:    not a1, s3
@@ -485,7 +485,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    bnez s4, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -495,14 +495,14 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB11_3:
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    lw s6, 0(sp)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
@@ -513,7 +513,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_ctpop_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    srli a1, a0, 1
 ; RV32I-NEXT:    lui a2, 349525
 ; RV32I-NEXT:    addi a2, a2, 1365
@@ -532,9 +532,9 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call i32 @llvm.ctpop.i32(i32 %a)

diff  --git a/llvm/test/CodeGen/RISCV/byval.ll b/llvm/test/CodeGen/RISCV/byval.ll
index 99e6abf23b88..684d3a8aab96 100644
--- a/llvm/test/CodeGen/RISCV/byval.ll
+++ b/llvm/test/CodeGen/RISCV/byval.ll
@@ -21,7 +21,7 @@ define void @caller() nounwind {
 ; RV32I-LABEL: caller:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a0, %hi(foo)
 ; RV32I-NEXT:    lw a1, %lo(foo)(a0)
 ; RV32I-NEXT:    sw a1, 12(sp)
@@ -34,7 +34,7 @@ define void @caller() nounwind {
 ; RV32I-NEXT:    sw a0, 16(sp)
 ; RV32I-NEXT:    addi a0, sp, 12
 ; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
index 2c5206d57c72..e213d1f3b594 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
@@ -164,18 +164,18 @@ define void @callee() nounwind {
 ; ILP32F-LABEL: callee:
 ; ILP32F:       # %bb.0:
 ; ILP32F-NEXT:    addi sp, sp, -48
-; ILP32F-NEXT:    fsw fs0, 44(sp)
-; ILP32F-NEXT:    fsw fs1, 40(sp)
-; ILP32F-NEXT:    fsw fs2, 36(sp)
-; ILP32F-NEXT:    fsw fs3, 32(sp)
-; ILP32F-NEXT:    fsw fs4, 28(sp)
-; ILP32F-NEXT:    fsw fs5, 24(sp)
-; ILP32F-NEXT:    fsw fs6, 20(sp)
-; ILP32F-NEXT:    fsw fs7, 16(sp)
-; ILP32F-NEXT:    fsw fs8, 12(sp)
-; ILP32F-NEXT:    fsw fs9, 8(sp)
-; ILP32F-NEXT:    fsw fs10, 4(sp)
-; ILP32F-NEXT:    fsw fs11, 0(sp)
+; ILP32F-NEXT:    fsw fs0, 44(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs1, 40(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs2, 36(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs3, 32(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs4, 28(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs5, 24(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs6, 20(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs7, 16(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs9, 8(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    lui a0, %hi(var)
 ; ILP32F-NEXT:    flw ft0, %lo(var)(a0)
 ; ILP32F-NEXT:    flw ft1, %lo(var+4)(a0)
@@ -242,36 +242,36 @@ define void @callee() nounwind {
 ; ILP32F-NEXT:    fsw ft2, %lo(var+8)(a0)
 ; ILP32F-NEXT:    fsw ft1, %lo(var+4)(a0)
 ; ILP32F-NEXT:    fsw ft0, %lo(var)(a0)
-; ILP32F-NEXT:    flw fs11, 0(sp)
-; ILP32F-NEXT:    flw fs10, 4(sp)
-; ILP32F-NEXT:    flw fs9, 8(sp)
-; ILP32F-NEXT:    flw fs8, 12(sp)
-; ILP32F-NEXT:    flw fs7, 16(sp)
-; ILP32F-NEXT:    flw fs6, 20(sp)
-; ILP32F-NEXT:    flw fs5, 24(sp)
-; ILP32F-NEXT:    flw fs4, 28(sp)
-; ILP32F-NEXT:    flw fs3, 32(sp)
-; ILP32F-NEXT:    flw fs2, 36(sp)
-; ILP32F-NEXT:    flw fs1, 40(sp)
-; ILP32F-NEXT:    flw fs0, 44(sp)
+; ILP32F-NEXT:    flw fs11, 0(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs10, 4(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs9, 8(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs7, 16(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs6, 20(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs5, 24(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs4, 28(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs3, 32(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    addi sp, sp, 48
 ; ILP32F-NEXT:    ret
 ;
 ; LP64F-LABEL: callee:
 ; LP64F:       # %bb.0:
 ; LP64F-NEXT:    addi sp, sp, -48
-; LP64F-NEXT:    fsw fs0, 44(sp)
-; LP64F-NEXT:    fsw fs1, 40(sp)
-; LP64F-NEXT:    fsw fs2, 36(sp)
-; LP64F-NEXT:    fsw fs3, 32(sp)
-; LP64F-NEXT:    fsw fs4, 28(sp)
-; LP64F-NEXT:    fsw fs5, 24(sp)
-; LP64F-NEXT:    fsw fs6, 20(sp)
-; LP64F-NEXT:    fsw fs7, 16(sp)
-; LP64F-NEXT:    fsw fs8, 12(sp)
-; LP64F-NEXT:    fsw fs9, 8(sp)
-; LP64F-NEXT:    fsw fs10, 4(sp)
-; LP64F-NEXT:    fsw fs11, 0(sp)
+; LP64F-NEXT:    fsw fs0, 44(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs1, 40(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs2, 36(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs3, 32(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs4, 28(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs5, 24(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs6, 20(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs7, 16(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs9, 8(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    lui a0, %hi(var)
 ; LP64F-NEXT:    flw ft0, %lo(var)(a0)
 ; LP64F-NEXT:    flw ft1, %lo(var+4)(a0)
@@ -338,36 +338,36 @@ define void @callee() nounwind {
 ; LP64F-NEXT:    fsw ft2, %lo(var+8)(a0)
 ; LP64F-NEXT:    fsw ft1, %lo(var+4)(a0)
 ; LP64F-NEXT:    fsw ft0, %lo(var)(a0)
-; LP64F-NEXT:    flw fs11, 0(sp)
-; LP64F-NEXT:    flw fs10, 4(sp)
-; LP64F-NEXT:    flw fs9, 8(sp)
-; LP64F-NEXT:    flw fs8, 12(sp)
-; LP64F-NEXT:    flw fs7, 16(sp)
-; LP64F-NEXT:    flw fs6, 20(sp)
-; LP64F-NEXT:    flw fs5, 24(sp)
-; LP64F-NEXT:    flw fs4, 28(sp)
-; LP64F-NEXT:    flw fs3, 32(sp)
-; LP64F-NEXT:    flw fs2, 36(sp)
-; LP64F-NEXT:    flw fs1, 40(sp)
-; LP64F-NEXT:    flw fs0, 44(sp)
+; LP64F-NEXT:    flw fs11, 0(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs10, 4(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs9, 8(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs7, 16(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs6, 20(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs5, 24(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs4, 28(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs3, 32(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    addi sp, sp, 48
 ; LP64F-NEXT:    ret
 ;
 ; ILP32D-LABEL: callee:
 ; ILP32D:       # %bb.0:
 ; ILP32D-NEXT:    addi sp, sp, -96
-; ILP32D-NEXT:    fsd fs0, 88(sp)
-; ILP32D-NEXT:    fsd fs1, 80(sp)
-; ILP32D-NEXT:    fsd fs2, 72(sp)
-; ILP32D-NEXT:    fsd fs3, 64(sp)
-; ILP32D-NEXT:    fsd fs4, 56(sp)
-; ILP32D-NEXT:    fsd fs5, 48(sp)
-; ILP32D-NEXT:    fsd fs6, 40(sp)
-; ILP32D-NEXT:    fsd fs7, 32(sp)
-; ILP32D-NEXT:    fsd fs8, 24(sp)
-; ILP32D-NEXT:    fsd fs9, 16(sp)
-; ILP32D-NEXT:    fsd fs10, 8(sp)
-; ILP32D-NEXT:    fsd fs11, 0(sp)
+; ILP32D-NEXT:    fsd fs0, 88(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs1, 80(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs2, 72(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs3, 64(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs4, 56(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs5, 48(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs6, 40(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs7, 32(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs8, 24(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs9, 16(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui a0, %hi(var)
 ; ILP32D-NEXT:    flw ft0, %lo(var)(a0)
 ; ILP32D-NEXT:    flw ft1, %lo(var+4)(a0)
@@ -434,36 +434,36 @@ define void @callee() nounwind {
 ; ILP32D-NEXT:    fsw ft2, %lo(var+8)(a0)
 ; ILP32D-NEXT:    fsw ft1, %lo(var+4)(a0)
 ; ILP32D-NEXT:    fsw ft0, %lo(var)(a0)
-; ILP32D-NEXT:    fld fs11, 0(sp)
-; ILP32D-NEXT:    fld fs10, 8(sp)
-; ILP32D-NEXT:    fld fs9, 16(sp)
-; ILP32D-NEXT:    fld fs8, 24(sp)
-; ILP32D-NEXT:    fld fs7, 32(sp)
-; ILP32D-NEXT:    fld fs6, 40(sp)
-; ILP32D-NEXT:    fld fs5, 48(sp)
-; ILP32D-NEXT:    fld fs4, 56(sp)
-; ILP32D-NEXT:    fld fs3, 64(sp)
-; ILP32D-NEXT:    fld fs2, 72(sp)
-; ILP32D-NEXT:    fld fs1, 80(sp)
-; ILP32D-NEXT:    fld fs0, 88(sp)
+; ILP32D-NEXT:    fld fs11, 0(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs9, 16(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs8, 24(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs7, 32(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs6, 40(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs5, 48(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs4, 56(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs3, 64(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    addi sp, sp, 96
 ; ILP32D-NEXT:    ret
 ;
 ; LP64D-LABEL: callee:
 ; LP64D:       # %bb.0:
 ; LP64D-NEXT:    addi sp, sp, -96
-; LP64D-NEXT:    fsd fs0, 88(sp)
-; LP64D-NEXT:    fsd fs1, 80(sp)
-; LP64D-NEXT:    fsd fs2, 72(sp)
-; LP64D-NEXT:    fsd fs3, 64(sp)
-; LP64D-NEXT:    fsd fs4, 56(sp)
-; LP64D-NEXT:    fsd fs5, 48(sp)
-; LP64D-NEXT:    fsd fs6, 40(sp)
-; LP64D-NEXT:    fsd fs7, 32(sp)
-; LP64D-NEXT:    fsd fs8, 24(sp)
-; LP64D-NEXT:    fsd fs9, 16(sp)
-; LP64D-NEXT:    fsd fs10, 8(sp)
-; LP64D-NEXT:    fsd fs11, 0(sp)
+; LP64D-NEXT:    fsd fs0, 88(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs1, 80(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs2, 72(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs3, 64(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs4, 56(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs5, 48(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs6, 40(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs7, 32(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs8, 24(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs9, 16(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui a0, %hi(var)
 ; LP64D-NEXT:    flw ft0, %lo(var)(a0)
 ; LP64D-NEXT:    flw ft1, %lo(var+4)(a0)
@@ -530,18 +530,18 @@ define void @callee() nounwind {
 ; LP64D-NEXT:    fsw ft2, %lo(var+8)(a0)
 ; LP64D-NEXT:    fsw ft1, %lo(var+4)(a0)
 ; LP64D-NEXT:    fsw ft0, %lo(var)(a0)
-; LP64D-NEXT:    fld fs11, 0(sp)
-; LP64D-NEXT:    fld fs10, 8(sp)
-; LP64D-NEXT:    fld fs9, 16(sp)
-; LP64D-NEXT:    fld fs8, 24(sp)
-; LP64D-NEXT:    fld fs7, 32(sp)
-; LP64D-NEXT:    fld fs6, 40(sp)
-; LP64D-NEXT:    fld fs5, 48(sp)
-; LP64D-NEXT:    fld fs4, 56(sp)
-; LP64D-NEXT:    fld fs3, 64(sp)
-; LP64D-NEXT:    fld fs2, 72(sp)
-; LP64D-NEXT:    fld fs1, 80(sp)
-; LP64D-NEXT:    fld fs0, 88(sp)
+; LP64D-NEXT:    fld fs11, 0(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs9, 16(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs8, 24(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs7, 32(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs6, 40(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs5, 48(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs4, 56(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs3, 64(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    addi sp, sp, 96
 ; LP64D-NEXT:    ret
   %val = load [32 x float], [32 x float]* @var
@@ -560,349 +560,349 @@ define void @caller() nounwind {
 ; ILP32-LABEL: caller:
 ; ILP32:       # %bb.0:
 ; ILP32-NEXT:    addi sp, sp, -144
-; ILP32-NEXT:    sw ra, 140(sp)
-; ILP32-NEXT:    sw s0, 136(sp)
-; ILP32-NEXT:    sw s1, 132(sp)
+; ILP32-NEXT:    sw ra, 140(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    sw s0, 136(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    sw s1, 132(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    lui s0, %hi(var)
 ; ILP32-NEXT:    flw ft0, %lo(var)(s0)
-; ILP32-NEXT:    fsw ft0, 128(sp)
+; ILP32-NEXT:    fsw ft0, 128(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, %lo(var+4)(s0)
-; ILP32-NEXT:    fsw ft0, 124(sp)
+; ILP32-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    fsw ft0, 120(sp)
+; ILP32-NEXT:    fsw ft0, 120(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, %lo(var+12)(s0)
-; ILP32-NEXT:    fsw ft0, 116(sp)
+; ILP32-NEXT:    fsw ft0, 116(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    addi s1, s0, %lo(var)
 ; ILP32-NEXT:    flw ft0, 16(s1)
-; ILP32-NEXT:    fsw ft0, 112(sp)
+; ILP32-NEXT:    fsw ft0, 112(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 20(s1)
-; ILP32-NEXT:    fsw ft0, 108(sp)
+; ILP32-NEXT:    fsw ft0, 108(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 24(s1)
-; ILP32-NEXT:    fsw ft0, 104(sp)
+; ILP32-NEXT:    fsw ft0, 104(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 28(s1)
-; ILP32-NEXT:    fsw ft0, 100(sp)
+; ILP32-NEXT:    fsw ft0, 100(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 32(s1)
-; ILP32-NEXT:    fsw ft0, 96(sp)
+; ILP32-NEXT:    fsw ft0, 96(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 36(s1)
-; ILP32-NEXT:    fsw ft0, 92(sp)
+; ILP32-NEXT:    fsw ft0, 92(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 40(s1)
-; ILP32-NEXT:    fsw ft0, 88(sp)
+; ILP32-NEXT:    fsw ft0, 88(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 44(s1)
-; ILP32-NEXT:    fsw ft0, 84(sp)
+; ILP32-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 48(s1)
-; ILP32-NEXT:    fsw ft0, 80(sp)
+; ILP32-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 52(s1)
-; ILP32-NEXT:    fsw ft0, 76(sp)
+; ILP32-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 56(s1)
-; ILP32-NEXT:    fsw ft0, 72(sp)
+; ILP32-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 60(s1)
-; ILP32-NEXT:    fsw ft0, 68(sp)
+; ILP32-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 64(s1)
-; ILP32-NEXT:    fsw ft0, 64(sp)
+; ILP32-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 68(s1)
-; ILP32-NEXT:    fsw ft0, 60(sp)
+; ILP32-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 72(s1)
-; ILP32-NEXT:    fsw ft0, 56(sp)
+; ILP32-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 76(s1)
-; ILP32-NEXT:    fsw ft0, 52(sp)
+; ILP32-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 80(s1)
-; ILP32-NEXT:    fsw ft0, 48(sp)
+; ILP32-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 84(s1)
-; ILP32-NEXT:    fsw ft0, 44(sp)
+; ILP32-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 88(s1)
-; ILP32-NEXT:    fsw ft0, 40(sp)
+; ILP32-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 92(s1)
-; ILP32-NEXT:    fsw ft0, 36(sp)
+; ILP32-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 96(s1)
-; ILP32-NEXT:    fsw ft0, 32(sp)
+; ILP32-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 100(s1)
-; ILP32-NEXT:    fsw ft0, 28(sp)
+; ILP32-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 104(s1)
-; ILP32-NEXT:    fsw ft0, 24(sp)
+; ILP32-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 108(s1)
-; ILP32-NEXT:    fsw ft0, 20(sp)
+; ILP32-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 112(s1)
-; ILP32-NEXT:    fsw ft0, 16(sp)
+; ILP32-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 116(s1)
-; ILP32-NEXT:    fsw ft0, 12(sp)
+; ILP32-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 120(s1)
-; ILP32-NEXT:    fsw ft0, 8(sp)
+; ILP32-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    flw ft0, 124(s1)
-; ILP32-NEXT:    fsw ft0, 4(sp)
+; ILP32-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    call callee
-; ILP32-NEXT:    flw ft0, 4(sp)
+; ILP32-NEXT:    flw ft0, 4(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 124(s1)
-; ILP32-NEXT:    flw ft0, 8(sp)
+; ILP32-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 120(s1)
-; ILP32-NEXT:    flw ft0, 12(sp)
+; ILP32-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 116(s1)
-; ILP32-NEXT:    flw ft0, 16(sp)
+; ILP32-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 112(s1)
-; ILP32-NEXT:    flw ft0, 20(sp)
+; ILP32-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 108(s1)
-; ILP32-NEXT:    flw ft0, 24(sp)
+; ILP32-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 104(s1)
-; ILP32-NEXT:    flw ft0, 28(sp)
+; ILP32-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 100(s1)
-; ILP32-NEXT:    flw ft0, 32(sp)
+; ILP32-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 96(s1)
-; ILP32-NEXT:    flw ft0, 36(sp)
+; ILP32-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 92(s1)
-; ILP32-NEXT:    flw ft0, 40(sp)
+; ILP32-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 88(s1)
-; ILP32-NEXT:    flw ft0, 44(sp)
+; ILP32-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 84(s1)
-; ILP32-NEXT:    flw ft0, 48(sp)
+; ILP32-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 80(s1)
-; ILP32-NEXT:    flw ft0, 52(sp)
+; ILP32-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 76(s1)
-; ILP32-NEXT:    flw ft0, 56(sp)
+; ILP32-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 72(s1)
-; ILP32-NEXT:    flw ft0, 60(sp)
+; ILP32-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 68(s1)
-; ILP32-NEXT:    flw ft0, 64(sp)
+; ILP32-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 64(s1)
-; ILP32-NEXT:    flw ft0, 68(sp)
+; ILP32-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 60(s1)
-; ILP32-NEXT:    flw ft0, 72(sp)
+; ILP32-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 56(s1)
-; ILP32-NEXT:    flw ft0, 76(sp)
+; ILP32-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 52(s1)
-; ILP32-NEXT:    flw ft0, 80(sp)
+; ILP32-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 48(s1)
-; ILP32-NEXT:    flw ft0, 84(sp)
+; ILP32-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 44(s1)
-; ILP32-NEXT:    flw ft0, 88(sp)
+; ILP32-NEXT:    flw ft0, 88(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 40(s1)
-; ILP32-NEXT:    flw ft0, 92(sp)
+; ILP32-NEXT:    flw ft0, 92(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 36(s1)
-; ILP32-NEXT:    flw ft0, 96(sp)
+; ILP32-NEXT:    flw ft0, 96(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 32(s1)
-; ILP32-NEXT:    flw ft0, 100(sp)
+; ILP32-NEXT:    flw ft0, 100(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 28(s1)
-; ILP32-NEXT:    flw ft0, 104(sp)
+; ILP32-NEXT:    flw ft0, 104(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 24(s1)
-; ILP32-NEXT:    flw ft0, 108(sp)
+; ILP32-NEXT:    flw ft0, 108(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 20(s1)
-; ILP32-NEXT:    flw ft0, 112(sp)
+; ILP32-NEXT:    flw ft0, 112(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, 16(s1)
-; ILP32-NEXT:    flw ft0, 116(sp)
+; ILP32-NEXT:    flw ft0, 116(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, %lo(var+12)(s0)
-; ILP32-NEXT:    flw ft0, 120(sp)
+; ILP32-NEXT:    flw ft0, 120(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    flw ft0, 124(sp)
+; ILP32-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, %lo(var+4)(s0)
-; ILP32-NEXT:    flw ft0, 128(sp)
+; ILP32-NEXT:    flw ft0, 128(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    fsw ft0, %lo(var)(s0)
-; ILP32-NEXT:    lw s1, 132(sp)
-; ILP32-NEXT:    lw s0, 136(sp)
-; ILP32-NEXT:    lw ra, 140(sp)
+; ILP32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    addi sp, sp, 144
 ; ILP32-NEXT:    ret
 ;
 ; LP64-LABEL: caller:
 ; LP64:       # %bb.0:
 ; LP64-NEXT:    addi sp, sp, -160
-; LP64-NEXT:    sd ra, 152(sp)
-; LP64-NEXT:    sd s0, 144(sp)
-; LP64-NEXT:    sd s1, 136(sp)
+; LP64-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
+; LP64-NEXT:    sd s0, 144(sp) # 8-byte Folded Spill
+; LP64-NEXT:    sd s1, 136(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    lui s0, %hi(var)
 ; LP64-NEXT:    flw ft0, %lo(var)(s0)
-; LP64-NEXT:    fsw ft0, 132(sp)
+; LP64-NEXT:    fsw ft0, 132(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, %lo(var+4)(s0)
-; LP64-NEXT:    fsw ft0, 128(sp)
+; LP64-NEXT:    fsw ft0, 128(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, %lo(var+8)(s0)
-; LP64-NEXT:    fsw ft0, 124(sp)
+; LP64-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, %lo(var+12)(s0)
-; LP64-NEXT:    fsw ft0, 120(sp)
+; LP64-NEXT:    fsw ft0, 120(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    addi s1, s0, %lo(var)
 ; LP64-NEXT:    flw ft0, 16(s1)
-; LP64-NEXT:    fsw ft0, 116(sp)
+; LP64-NEXT:    fsw ft0, 116(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 20(s1)
-; LP64-NEXT:    fsw ft0, 112(sp)
+; LP64-NEXT:    fsw ft0, 112(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 24(s1)
-; LP64-NEXT:    fsw ft0, 108(sp)
+; LP64-NEXT:    fsw ft0, 108(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 28(s1)
-; LP64-NEXT:    fsw ft0, 104(sp)
+; LP64-NEXT:    fsw ft0, 104(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 32(s1)
-; LP64-NEXT:    fsw ft0, 100(sp)
+; LP64-NEXT:    fsw ft0, 100(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 36(s1)
-; LP64-NEXT:    fsw ft0, 96(sp)
+; LP64-NEXT:    fsw ft0, 96(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 40(s1)
-; LP64-NEXT:    fsw ft0, 92(sp)
+; LP64-NEXT:    fsw ft0, 92(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 44(s1)
-; LP64-NEXT:    fsw ft0, 88(sp)
+; LP64-NEXT:    fsw ft0, 88(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 48(s1)
-; LP64-NEXT:    fsw ft0, 84(sp)
+; LP64-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 52(s1)
-; LP64-NEXT:    fsw ft0, 80(sp)
+; LP64-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 56(s1)
-; LP64-NEXT:    fsw ft0, 76(sp)
+; LP64-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 60(s1)
-; LP64-NEXT:    fsw ft0, 72(sp)
+; LP64-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 64(s1)
-; LP64-NEXT:    fsw ft0, 68(sp)
+; LP64-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 68(s1)
-; LP64-NEXT:    fsw ft0, 64(sp)
+; LP64-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 72(s1)
-; LP64-NEXT:    fsw ft0, 60(sp)
+; LP64-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 76(s1)
-; LP64-NEXT:    fsw ft0, 56(sp)
+; LP64-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 80(s1)
-; LP64-NEXT:    fsw ft0, 52(sp)
+; LP64-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 84(s1)
-; LP64-NEXT:    fsw ft0, 48(sp)
+; LP64-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 88(s1)
-; LP64-NEXT:    fsw ft0, 44(sp)
+; LP64-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 92(s1)
-; LP64-NEXT:    fsw ft0, 40(sp)
+; LP64-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 96(s1)
-; LP64-NEXT:    fsw ft0, 36(sp)
+; LP64-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 100(s1)
-; LP64-NEXT:    fsw ft0, 32(sp)
+; LP64-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 104(s1)
-; LP64-NEXT:    fsw ft0, 28(sp)
+; LP64-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 108(s1)
-; LP64-NEXT:    fsw ft0, 24(sp)
+; LP64-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 112(s1)
-; LP64-NEXT:    fsw ft0, 20(sp)
+; LP64-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 116(s1)
-; LP64-NEXT:    fsw ft0, 16(sp)
+; LP64-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 120(s1)
-; LP64-NEXT:    fsw ft0, 12(sp)
+; LP64-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    flw ft0, 124(s1)
-; LP64-NEXT:    fsw ft0, 8(sp)
+; LP64-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    call callee
-; LP64-NEXT:    flw ft0, 8(sp)
+; LP64-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 124(s1)
-; LP64-NEXT:    flw ft0, 12(sp)
+; LP64-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 120(s1)
-; LP64-NEXT:    flw ft0, 16(sp)
+; LP64-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 116(s1)
-; LP64-NEXT:    flw ft0, 20(sp)
+; LP64-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 112(s1)
-; LP64-NEXT:    flw ft0, 24(sp)
+; LP64-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 108(s1)
-; LP64-NEXT:    flw ft0, 28(sp)
+; LP64-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 104(s1)
-; LP64-NEXT:    flw ft0, 32(sp)
+; LP64-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 100(s1)
-; LP64-NEXT:    flw ft0, 36(sp)
+; LP64-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 96(s1)
-; LP64-NEXT:    flw ft0, 40(sp)
+; LP64-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 92(s1)
-; LP64-NEXT:    flw ft0, 44(sp)
+; LP64-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 88(s1)
-; LP64-NEXT:    flw ft0, 48(sp)
+; LP64-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 84(s1)
-; LP64-NEXT:    flw ft0, 52(sp)
+; LP64-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 80(s1)
-; LP64-NEXT:    flw ft0, 56(sp)
+; LP64-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 76(s1)
-; LP64-NEXT:    flw ft0, 60(sp)
+; LP64-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 72(s1)
-; LP64-NEXT:    flw ft0, 64(sp)
+; LP64-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 68(s1)
-; LP64-NEXT:    flw ft0, 68(sp)
+; LP64-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 64(s1)
-; LP64-NEXT:    flw ft0, 72(sp)
+; LP64-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 60(s1)
-; LP64-NEXT:    flw ft0, 76(sp)
+; LP64-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 56(s1)
-; LP64-NEXT:    flw ft0, 80(sp)
+; LP64-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 52(s1)
-; LP64-NEXT:    flw ft0, 84(sp)
+; LP64-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 48(s1)
-; LP64-NEXT:    flw ft0, 88(sp)
+; LP64-NEXT:    flw ft0, 88(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 44(s1)
-; LP64-NEXT:    flw ft0, 92(sp)
+; LP64-NEXT:    flw ft0, 92(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 40(s1)
-; LP64-NEXT:    flw ft0, 96(sp)
+; LP64-NEXT:    flw ft0, 96(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 36(s1)
-; LP64-NEXT:    flw ft0, 100(sp)
+; LP64-NEXT:    flw ft0, 100(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 32(s1)
-; LP64-NEXT:    flw ft0, 104(sp)
+; LP64-NEXT:    flw ft0, 104(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 28(s1)
-; LP64-NEXT:    flw ft0, 108(sp)
+; LP64-NEXT:    flw ft0, 108(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 24(s1)
-; LP64-NEXT:    flw ft0, 112(sp)
+; LP64-NEXT:    flw ft0, 112(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 20(s1)
-; LP64-NEXT:    flw ft0, 116(sp)
+; LP64-NEXT:    flw ft0, 116(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, 16(s1)
-; LP64-NEXT:    flw ft0, 120(sp)
+; LP64-NEXT:    flw ft0, 120(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, %lo(var+12)(s0)
-; LP64-NEXT:    flw ft0, 124(sp)
+; LP64-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, %lo(var+8)(s0)
-; LP64-NEXT:    flw ft0, 128(sp)
+; LP64-NEXT:    flw ft0, 128(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, %lo(var+4)(s0)
-; LP64-NEXT:    flw ft0, 132(sp)
+; LP64-NEXT:    flw ft0, 132(sp) # 4-byte Folded Reload
 ; LP64-NEXT:    fsw ft0, %lo(var)(s0)
-; LP64-NEXT:    ld s1, 136(sp)
-; LP64-NEXT:    ld s0, 144(sp)
-; LP64-NEXT:    ld ra, 152(sp)
+; LP64-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
+; LP64-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
+; LP64-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    addi sp, sp, 160
 ; LP64-NEXT:    ret
 ;
 ; ILP32F-LABEL: caller:
 ; ILP32F:       # %bb.0:
 ; ILP32F-NEXT:    addi sp, sp, -144
-; ILP32F-NEXT:    sw ra, 140(sp)
-; ILP32F-NEXT:    sw s0, 136(sp)
-; ILP32F-NEXT:    sw s1, 132(sp)
-; ILP32F-NEXT:    fsw fs0, 128(sp)
-; ILP32F-NEXT:    fsw fs1, 124(sp)
-; ILP32F-NEXT:    fsw fs2, 120(sp)
-; ILP32F-NEXT:    fsw fs3, 116(sp)
-; ILP32F-NEXT:    fsw fs4, 112(sp)
-; ILP32F-NEXT:    fsw fs5, 108(sp)
-; ILP32F-NEXT:    fsw fs6, 104(sp)
-; ILP32F-NEXT:    fsw fs7, 100(sp)
-; ILP32F-NEXT:    fsw fs8, 96(sp)
-; ILP32F-NEXT:    fsw fs9, 92(sp)
-; ILP32F-NEXT:    fsw fs10, 88(sp)
-; ILP32F-NEXT:    fsw fs11, 84(sp)
+; ILP32F-NEXT:    sw ra, 140(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    sw s0, 136(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    sw s1, 132(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs0, 128(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs1, 124(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs2, 120(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs3, 116(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs4, 112(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs5, 108(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs6, 104(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs7, 100(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs8, 96(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs9, 92(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs10, 88(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    fsw fs11, 84(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    lui s0, %hi(var)
 ; ILP32F-NEXT:    flw ft0, %lo(var)(s0)
-; ILP32F-NEXT:    fsw ft0, 80(sp)
+; ILP32F-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, %lo(var+4)(s0)
-; ILP32F-NEXT:    fsw ft0, 76(sp)
+; ILP32F-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, %lo(var+8)(s0)
-; ILP32F-NEXT:    fsw ft0, 72(sp)
+; ILP32F-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, %lo(var+12)(s0)
-; ILP32F-NEXT:    fsw ft0, 68(sp)
+; ILP32F-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    addi s1, s0, %lo(var)
 ; ILP32F-NEXT:    flw ft0, 16(s1)
-; ILP32F-NEXT:    fsw ft0, 64(sp)
+; ILP32F-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 20(s1)
-; ILP32F-NEXT:    fsw ft0, 60(sp)
+; ILP32F-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 24(s1)
-; ILP32F-NEXT:    fsw ft0, 56(sp)
+; ILP32F-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 28(s1)
-; ILP32F-NEXT:    fsw ft0, 52(sp)
+; ILP32F-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 32(s1)
-; ILP32F-NEXT:    fsw ft0, 48(sp)
+; ILP32F-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 36(s1)
-; ILP32F-NEXT:    fsw ft0, 44(sp)
+; ILP32F-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 40(s1)
-; ILP32F-NEXT:    fsw ft0, 40(sp)
+; ILP32F-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 44(s1)
-; ILP32F-NEXT:    fsw ft0, 36(sp)
+; ILP32F-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 48(s1)
-; ILP32F-NEXT:    fsw ft0, 32(sp)
+; ILP32F-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 52(s1)
-; ILP32F-NEXT:    fsw ft0, 28(sp)
+; ILP32F-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 56(s1)
-; ILP32F-NEXT:    fsw ft0, 24(sp)
+; ILP32F-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 60(s1)
-; ILP32F-NEXT:    fsw ft0, 20(sp)
+; ILP32F-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 64(s1)
-; ILP32F-NEXT:    fsw ft0, 16(sp)
+; ILP32F-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 68(s1)
-; ILP32F-NEXT:    fsw ft0, 12(sp)
+; ILP32F-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 72(s1)
-; ILP32F-NEXT:    fsw ft0, 8(sp)
+; ILP32F-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw ft0, 76(s1)
-; ILP32F-NEXT:    fsw ft0, 4(sp)
+; ILP32F-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw fs8, 80(s1)
 ; ILP32F-NEXT:    flw fs9, 84(s1)
 ; ILP32F-NEXT:    flw fs10, 88(s1)
@@ -928,124 +928,124 @@ define void @caller() nounwind {
 ; ILP32F-NEXT:    fsw fs10, 88(s1)
 ; ILP32F-NEXT:    fsw fs9, 84(s1)
 ; ILP32F-NEXT:    fsw fs8, 80(s1)
-; ILP32F-NEXT:    flw ft0, 4(sp)
+; ILP32F-NEXT:    flw ft0, 4(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 76(s1)
-; ILP32F-NEXT:    flw ft0, 8(sp)
+; ILP32F-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 72(s1)
-; ILP32F-NEXT:    flw ft0, 12(sp)
+; ILP32F-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 68(s1)
-; ILP32F-NEXT:    flw ft0, 16(sp)
+; ILP32F-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 64(s1)
-; ILP32F-NEXT:    flw ft0, 20(sp)
+; ILP32F-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 60(s1)
-; ILP32F-NEXT:    flw ft0, 24(sp)
+; ILP32F-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 56(s1)
-; ILP32F-NEXT:    flw ft0, 28(sp)
+; ILP32F-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 52(s1)
-; ILP32F-NEXT:    flw ft0, 32(sp)
+; ILP32F-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 48(s1)
-; ILP32F-NEXT:    flw ft0, 36(sp)
+; ILP32F-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 44(s1)
-; ILP32F-NEXT:    flw ft0, 40(sp)
+; ILP32F-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 40(s1)
-; ILP32F-NEXT:    flw ft0, 44(sp)
+; ILP32F-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 36(s1)
-; ILP32F-NEXT:    flw ft0, 48(sp)
+; ILP32F-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 32(s1)
-; ILP32F-NEXT:    flw ft0, 52(sp)
+; ILP32F-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 28(s1)
-; ILP32F-NEXT:    flw ft0, 56(sp)
+; ILP32F-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 24(s1)
-; ILP32F-NEXT:    flw ft0, 60(sp)
+; ILP32F-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 20(s1)
-; ILP32F-NEXT:    flw ft0, 64(sp)
+; ILP32F-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, 16(s1)
-; ILP32F-NEXT:    flw ft0, 68(sp)
+; ILP32F-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, %lo(var+12)(s0)
-; ILP32F-NEXT:    flw ft0, 72(sp)
+; ILP32F-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, %lo(var+8)(s0)
-; ILP32F-NEXT:    flw ft0, 76(sp)
+; ILP32F-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, %lo(var+4)(s0)
-; ILP32F-NEXT:    flw ft0, 80(sp)
+; ILP32F-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    fsw ft0, %lo(var)(s0)
-; ILP32F-NEXT:    flw fs11, 84(sp)
-; ILP32F-NEXT:    flw fs10, 88(sp)
-; ILP32F-NEXT:    flw fs9, 92(sp)
-; ILP32F-NEXT:    flw fs8, 96(sp)
-; ILP32F-NEXT:    flw fs7, 100(sp)
-; ILP32F-NEXT:    flw fs6, 104(sp)
-; ILP32F-NEXT:    flw fs5, 108(sp)
-; ILP32F-NEXT:    flw fs4, 112(sp)
-; ILP32F-NEXT:    flw fs3, 116(sp)
-; ILP32F-NEXT:    flw fs2, 120(sp)
-; ILP32F-NEXT:    flw fs1, 124(sp)
-; ILP32F-NEXT:    flw fs0, 128(sp)
-; ILP32F-NEXT:    lw s1, 132(sp)
-; ILP32F-NEXT:    lw s0, 136(sp)
-; ILP32F-NEXT:    lw ra, 140(sp)
+; ILP32F-NEXT:    flw fs11, 84(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs10, 88(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs9, 92(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs8, 96(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs7, 100(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs6, 104(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs5, 108(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs4, 112(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs3, 116(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs2, 120(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs1, 124(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    flw fs0, 128(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    addi sp, sp, 144
 ; ILP32F-NEXT:    ret
 ;
 ; LP64F-LABEL: caller:
 ; LP64F:       # %bb.0:
 ; LP64F-NEXT:    addi sp, sp, -160
-; LP64F-NEXT:    sd ra, 152(sp)
-; LP64F-NEXT:    sd s0, 144(sp)
-; LP64F-NEXT:    sd s1, 136(sp)
-; LP64F-NEXT:    fsw fs0, 132(sp)
-; LP64F-NEXT:    fsw fs1, 128(sp)
-; LP64F-NEXT:    fsw fs2, 124(sp)
-; LP64F-NEXT:    fsw fs3, 120(sp)
-; LP64F-NEXT:    fsw fs4, 116(sp)
-; LP64F-NEXT:    fsw fs5, 112(sp)
-; LP64F-NEXT:    fsw fs6, 108(sp)
-; LP64F-NEXT:    fsw fs7, 104(sp)
-; LP64F-NEXT:    fsw fs8, 100(sp)
-; LP64F-NEXT:    fsw fs9, 96(sp)
-; LP64F-NEXT:    fsw fs10, 92(sp)
-; LP64F-NEXT:    fsw fs11, 88(sp)
+; LP64F-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
+; LP64F-NEXT:    sd s0, 144(sp) # 8-byte Folded Spill
+; LP64F-NEXT:    sd s1, 136(sp) # 8-byte Folded Spill
+; LP64F-NEXT:    fsw fs0, 132(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs1, 128(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs2, 124(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs3, 120(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs4, 116(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs5, 112(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs6, 108(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs7, 104(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs8, 100(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs9, 96(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs10, 92(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    fsw fs11, 88(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    lui s0, %hi(var)
 ; LP64F-NEXT:    flw ft0, %lo(var)(s0)
-; LP64F-NEXT:    fsw ft0, 84(sp)
+; LP64F-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, %lo(var+4)(s0)
-; LP64F-NEXT:    fsw ft0, 80(sp)
+; LP64F-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, %lo(var+8)(s0)
-; LP64F-NEXT:    fsw ft0, 76(sp)
+; LP64F-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, %lo(var+12)(s0)
-; LP64F-NEXT:    fsw ft0, 72(sp)
+; LP64F-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    addi s1, s0, %lo(var)
 ; LP64F-NEXT:    flw ft0, 16(s1)
-; LP64F-NEXT:    fsw ft0, 68(sp)
+; LP64F-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 20(s1)
-; LP64F-NEXT:    fsw ft0, 64(sp)
+; LP64F-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 24(s1)
-; LP64F-NEXT:    fsw ft0, 60(sp)
+; LP64F-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 28(s1)
-; LP64F-NEXT:    fsw ft0, 56(sp)
+; LP64F-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 32(s1)
-; LP64F-NEXT:    fsw ft0, 52(sp)
+; LP64F-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 36(s1)
-; LP64F-NEXT:    fsw ft0, 48(sp)
+; LP64F-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 40(s1)
-; LP64F-NEXT:    fsw ft0, 44(sp)
+; LP64F-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 44(s1)
-; LP64F-NEXT:    fsw ft0, 40(sp)
+; LP64F-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 48(s1)
-; LP64F-NEXT:    fsw ft0, 36(sp)
+; LP64F-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 52(s1)
-; LP64F-NEXT:    fsw ft0, 32(sp)
+; LP64F-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 56(s1)
-; LP64F-NEXT:    fsw ft0, 28(sp)
+; LP64F-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 60(s1)
-; LP64F-NEXT:    fsw ft0, 24(sp)
+; LP64F-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 64(s1)
-; LP64F-NEXT:    fsw ft0, 20(sp)
+; LP64F-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 68(s1)
-; LP64F-NEXT:    fsw ft0, 16(sp)
+; LP64F-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 72(s1)
-; LP64F-NEXT:    fsw ft0, 12(sp)
+; LP64F-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw ft0, 76(s1)
-; LP64F-NEXT:    fsw ft0, 8(sp)
+; LP64F-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw fs8, 80(s1)
 ; LP64F-NEXT:    flw fs9, 84(s1)
 ; LP64F-NEXT:    flw fs10, 88(s1)
@@ -1071,124 +1071,124 @@ define void @caller() nounwind {
 ; LP64F-NEXT:    fsw fs10, 88(s1)
 ; LP64F-NEXT:    fsw fs9, 84(s1)
 ; LP64F-NEXT:    fsw fs8, 80(s1)
-; LP64F-NEXT:    flw ft0, 8(sp)
+; LP64F-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 76(s1)
-; LP64F-NEXT:    flw ft0, 12(sp)
+; LP64F-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 72(s1)
-; LP64F-NEXT:    flw ft0, 16(sp)
+; LP64F-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 68(s1)
-; LP64F-NEXT:    flw ft0, 20(sp)
+; LP64F-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 64(s1)
-; LP64F-NEXT:    flw ft0, 24(sp)
+; LP64F-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 60(s1)
-; LP64F-NEXT:    flw ft0, 28(sp)
+; LP64F-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 56(s1)
-; LP64F-NEXT:    flw ft0, 32(sp)
+; LP64F-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 52(s1)
-; LP64F-NEXT:    flw ft0, 36(sp)
+; LP64F-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 48(s1)
-; LP64F-NEXT:    flw ft0, 40(sp)
+; LP64F-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 44(s1)
-; LP64F-NEXT:    flw ft0, 44(sp)
+; LP64F-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 40(s1)
-; LP64F-NEXT:    flw ft0, 48(sp)
+; LP64F-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 36(s1)
-; LP64F-NEXT:    flw ft0, 52(sp)
+; LP64F-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 32(s1)
-; LP64F-NEXT:    flw ft0, 56(sp)
+; LP64F-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 28(s1)
-; LP64F-NEXT:    flw ft0, 60(sp)
+; LP64F-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 24(s1)
-; LP64F-NEXT:    flw ft0, 64(sp)
+; LP64F-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 20(s1)
-; LP64F-NEXT:    flw ft0, 68(sp)
+; LP64F-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, 16(s1)
-; LP64F-NEXT:    flw ft0, 72(sp)
+; LP64F-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, %lo(var+12)(s0)
-; LP64F-NEXT:    flw ft0, 76(sp)
+; LP64F-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, %lo(var+8)(s0)
-; LP64F-NEXT:    flw ft0, 80(sp)
+; LP64F-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, %lo(var+4)(s0)
-; LP64F-NEXT:    flw ft0, 84(sp)
+; LP64F-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    fsw ft0, %lo(var)(s0)
-; LP64F-NEXT:    flw fs11, 88(sp)
-; LP64F-NEXT:    flw fs10, 92(sp)
-; LP64F-NEXT:    flw fs9, 96(sp)
-; LP64F-NEXT:    flw fs8, 100(sp)
-; LP64F-NEXT:    flw fs7, 104(sp)
-; LP64F-NEXT:    flw fs6, 108(sp)
-; LP64F-NEXT:    flw fs5, 112(sp)
-; LP64F-NEXT:    flw fs4, 116(sp)
-; LP64F-NEXT:    flw fs3, 120(sp)
-; LP64F-NEXT:    flw fs2, 124(sp)
-; LP64F-NEXT:    flw fs1, 128(sp)
-; LP64F-NEXT:    flw fs0, 132(sp)
-; LP64F-NEXT:    ld s1, 136(sp)
-; LP64F-NEXT:    ld s0, 144(sp)
-; LP64F-NEXT:    ld ra, 152(sp)
+; LP64F-NEXT:    flw fs11, 88(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs10, 92(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs9, 96(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs8, 100(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs7, 104(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs6, 108(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs5, 112(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs4, 116(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs3, 120(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs2, 124(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs1, 128(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    flw fs0, 132(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
+; LP64F-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
+; LP64F-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LP64F-NEXT:    addi sp, sp, 160
 ; LP64F-NEXT:    ret
 ;
 ; ILP32D-LABEL: caller:
 ; ILP32D:       # %bb.0:
 ; ILP32D-NEXT:    addi sp, sp, -192
-; ILP32D-NEXT:    sw ra, 188(sp)
-; ILP32D-NEXT:    sw s0, 184(sp)
-; ILP32D-NEXT:    sw s1, 180(sp)
-; ILP32D-NEXT:    fsd fs0, 168(sp)
-; ILP32D-NEXT:    fsd fs1, 160(sp)
-; ILP32D-NEXT:    fsd fs2, 152(sp)
-; ILP32D-NEXT:    fsd fs3, 144(sp)
-; ILP32D-NEXT:    fsd fs4, 136(sp)
-; ILP32D-NEXT:    fsd fs5, 128(sp)
-; ILP32D-NEXT:    fsd fs6, 120(sp)
-; ILP32D-NEXT:    fsd fs7, 112(sp)
-; ILP32D-NEXT:    fsd fs8, 104(sp)
-; ILP32D-NEXT:    fsd fs9, 96(sp)
-; ILP32D-NEXT:    fsd fs10, 88(sp)
-; ILP32D-NEXT:    fsd fs11, 80(sp)
+; ILP32D-NEXT:    sw ra, 188(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    sw s0, 184(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    sw s1, 180(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    fsd fs0, 168(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs1, 160(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs2, 152(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs3, 144(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs4, 136(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs5, 128(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs6, 120(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs7, 112(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs8, 104(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs9, 96(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs10, 88(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs11, 80(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui s0, %hi(var)
 ; ILP32D-NEXT:    flw ft0, %lo(var)(s0)
-; ILP32D-NEXT:    fsw ft0, 76(sp)
+; ILP32D-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, %lo(var+4)(s0)
-; ILP32D-NEXT:    fsw ft0, 72(sp)
+; ILP32D-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    fsw ft0, 68(sp)
+; ILP32D-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, %lo(var+12)(s0)
-; ILP32D-NEXT:    fsw ft0, 64(sp)
+; ILP32D-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    addi s1, s0, %lo(var)
 ; ILP32D-NEXT:    flw ft0, 16(s1)
-; ILP32D-NEXT:    fsw ft0, 60(sp)
+; ILP32D-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 20(s1)
-; ILP32D-NEXT:    fsw ft0, 56(sp)
+; ILP32D-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 24(s1)
-; ILP32D-NEXT:    fsw ft0, 52(sp)
+; ILP32D-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 28(s1)
-; ILP32D-NEXT:    fsw ft0, 48(sp)
+; ILP32D-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 32(s1)
-; ILP32D-NEXT:    fsw ft0, 44(sp)
+; ILP32D-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 36(s1)
-; ILP32D-NEXT:    fsw ft0, 40(sp)
+; ILP32D-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 40(s1)
-; ILP32D-NEXT:    fsw ft0, 36(sp)
+; ILP32D-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 44(s1)
-; ILP32D-NEXT:    fsw ft0, 32(sp)
+; ILP32D-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 48(s1)
-; ILP32D-NEXT:    fsw ft0, 28(sp)
+; ILP32D-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 52(s1)
-; ILP32D-NEXT:    fsw ft0, 24(sp)
+; ILP32D-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 56(s1)
-; ILP32D-NEXT:    fsw ft0, 20(sp)
+; ILP32D-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 60(s1)
-; ILP32D-NEXT:    fsw ft0, 16(sp)
+; ILP32D-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 64(s1)
-; ILP32D-NEXT:    fsw ft0, 12(sp)
+; ILP32D-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 68(s1)
-; ILP32D-NEXT:    fsw ft0, 8(sp)
+; ILP32D-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 72(s1)
-; ILP32D-NEXT:    fsw ft0, 4(sp)
+; ILP32D-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw ft0, 76(s1)
-; ILP32D-NEXT:    fsw ft0, 0(sp)
+; ILP32D-NEXT:    fsw ft0, 0(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw fs8, 80(s1)
 ; ILP32D-NEXT:    flw fs9, 84(s1)
 ; ILP32D-NEXT:    flw fs10, 88(s1)
@@ -1214,124 +1214,124 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fsw fs10, 88(s1)
 ; ILP32D-NEXT:    fsw fs9, 84(s1)
 ; ILP32D-NEXT:    fsw fs8, 80(s1)
-; ILP32D-NEXT:    flw ft0, 0(sp)
+; ILP32D-NEXT:    flw ft0, 0(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 76(s1)
-; ILP32D-NEXT:    flw ft0, 4(sp)
+; ILP32D-NEXT:    flw ft0, 4(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 72(s1)
-; ILP32D-NEXT:    flw ft0, 8(sp)
+; ILP32D-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 68(s1)
-; ILP32D-NEXT:    flw ft0, 12(sp)
+; ILP32D-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 64(s1)
-; ILP32D-NEXT:    flw ft0, 16(sp)
+; ILP32D-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 60(s1)
-; ILP32D-NEXT:    flw ft0, 20(sp)
+; ILP32D-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 56(s1)
-; ILP32D-NEXT:    flw ft0, 24(sp)
+; ILP32D-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 52(s1)
-; ILP32D-NEXT:    flw ft0, 28(sp)
+; ILP32D-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 48(s1)
-; ILP32D-NEXT:    flw ft0, 32(sp)
+; ILP32D-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 44(s1)
-; ILP32D-NEXT:    flw ft0, 36(sp)
+; ILP32D-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 40(s1)
-; ILP32D-NEXT:    flw ft0, 40(sp)
+; ILP32D-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 36(s1)
-; ILP32D-NEXT:    flw ft0, 44(sp)
+; ILP32D-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 32(s1)
-; ILP32D-NEXT:    flw ft0, 48(sp)
+; ILP32D-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 28(s1)
-; ILP32D-NEXT:    flw ft0, 52(sp)
+; ILP32D-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 24(s1)
-; ILP32D-NEXT:    flw ft0, 56(sp)
+; ILP32D-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 20(s1)
-; ILP32D-NEXT:    flw ft0, 60(sp)
+; ILP32D-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, 16(s1)
-; ILP32D-NEXT:    flw ft0, 64(sp)
+; ILP32D-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, %lo(var+12)(s0)
-; ILP32D-NEXT:    flw ft0, 68(sp)
+; ILP32D-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    flw ft0, 72(sp)
+; ILP32D-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, %lo(var+4)(s0)
-; ILP32D-NEXT:    flw ft0, 76(sp)
+; ILP32D-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    fsw ft0, %lo(var)(s0)
-; ILP32D-NEXT:    fld fs11, 80(sp)
-; ILP32D-NEXT:    fld fs10, 88(sp)
-; ILP32D-NEXT:    fld fs9, 96(sp)
-; ILP32D-NEXT:    fld fs8, 104(sp)
-; ILP32D-NEXT:    fld fs7, 112(sp)
-; ILP32D-NEXT:    fld fs6, 120(sp)
-; ILP32D-NEXT:    fld fs5, 128(sp)
-; ILP32D-NEXT:    fld fs4, 136(sp)
-; ILP32D-NEXT:    fld fs3, 144(sp)
-; ILP32D-NEXT:    fld fs2, 152(sp)
-; ILP32D-NEXT:    fld fs1, 160(sp)
-; ILP32D-NEXT:    fld fs0, 168(sp)
-; ILP32D-NEXT:    lw s1, 180(sp)
-; ILP32D-NEXT:    lw s0, 184(sp)
-; ILP32D-NEXT:    lw ra, 188(sp)
+; ILP32D-NEXT:    fld fs11, 80(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs10, 88(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs9, 96(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs8, 104(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs7, 112(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs6, 120(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs5, 128(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs4, 136(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs3, 144(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs2, 152(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs1, 160(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs0, 168(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    lw s1, 180(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    lw s0, 184(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    addi sp, sp, 192
 ; ILP32D-NEXT:    ret
 ;
 ; LP64D-LABEL: caller:
 ; LP64D:       # %bb.0:
 ; LP64D-NEXT:    addi sp, sp, -208
-; LP64D-NEXT:    sd ra, 200(sp)
-; LP64D-NEXT:    sd s0, 192(sp)
-; LP64D-NEXT:    sd s1, 184(sp)
-; LP64D-NEXT:    fsd fs0, 176(sp)
-; LP64D-NEXT:    fsd fs1, 168(sp)
-; LP64D-NEXT:    fsd fs2, 160(sp)
-; LP64D-NEXT:    fsd fs3, 152(sp)
-; LP64D-NEXT:    fsd fs4, 144(sp)
-; LP64D-NEXT:    fsd fs5, 136(sp)
-; LP64D-NEXT:    fsd fs6, 128(sp)
-; LP64D-NEXT:    fsd fs7, 120(sp)
-; LP64D-NEXT:    fsd fs8, 112(sp)
-; LP64D-NEXT:    fsd fs9, 104(sp)
-; LP64D-NEXT:    fsd fs10, 96(sp)
-; LP64D-NEXT:    fsd fs11, 88(sp)
+; LP64D-NEXT:    sd ra, 200(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    sd s0, 192(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    sd s1, 184(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs0, 176(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs1, 168(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs2, 160(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs3, 152(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs4, 144(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs5, 136(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs6, 128(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs7, 120(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs8, 112(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs9, 104(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs10, 96(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs11, 88(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui s0, %hi(var)
 ; LP64D-NEXT:    flw ft0, %lo(var)(s0)
-; LP64D-NEXT:    fsw ft0, 84(sp)
+; LP64D-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, %lo(var+4)(s0)
-; LP64D-NEXT:    fsw ft0, 80(sp)
+; LP64D-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    fsw ft0, 76(sp)
+; LP64D-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, %lo(var+12)(s0)
-; LP64D-NEXT:    fsw ft0, 72(sp)
+; LP64D-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    addi s1, s0, %lo(var)
 ; LP64D-NEXT:    flw ft0, 16(s1)
-; LP64D-NEXT:    fsw ft0, 68(sp)
+; LP64D-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 20(s1)
-; LP64D-NEXT:    fsw ft0, 64(sp)
+; LP64D-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 24(s1)
-; LP64D-NEXT:    fsw ft0, 60(sp)
+; LP64D-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 28(s1)
-; LP64D-NEXT:    fsw ft0, 56(sp)
+; LP64D-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 32(s1)
-; LP64D-NEXT:    fsw ft0, 52(sp)
+; LP64D-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 36(s1)
-; LP64D-NEXT:    fsw ft0, 48(sp)
+; LP64D-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 40(s1)
-; LP64D-NEXT:    fsw ft0, 44(sp)
+; LP64D-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 44(s1)
-; LP64D-NEXT:    fsw ft0, 40(sp)
+; LP64D-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 48(s1)
-; LP64D-NEXT:    fsw ft0, 36(sp)
+; LP64D-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 52(s1)
-; LP64D-NEXT:    fsw ft0, 32(sp)
+; LP64D-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 56(s1)
-; LP64D-NEXT:    fsw ft0, 28(sp)
+; LP64D-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 60(s1)
-; LP64D-NEXT:    fsw ft0, 24(sp)
+; LP64D-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 64(s1)
-; LP64D-NEXT:    fsw ft0, 20(sp)
+; LP64D-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 68(s1)
-; LP64D-NEXT:    fsw ft0, 16(sp)
+; LP64D-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 72(s1)
-; LP64D-NEXT:    fsw ft0, 12(sp)
+; LP64D-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw ft0, 76(s1)
-; LP64D-NEXT:    fsw ft0, 8(sp)
+; LP64D-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw fs8, 80(s1)
 ; LP64D-NEXT:    flw fs9, 84(s1)
 ; LP64D-NEXT:    flw fs10, 88(s1)
@@ -1357,61 +1357,61 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fsw fs10, 88(s1)
 ; LP64D-NEXT:    fsw fs9, 84(s1)
 ; LP64D-NEXT:    fsw fs8, 80(s1)
-; LP64D-NEXT:    flw ft0, 8(sp)
+; LP64D-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 76(s1)
-; LP64D-NEXT:    flw ft0, 12(sp)
+; LP64D-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 72(s1)
-; LP64D-NEXT:    flw ft0, 16(sp)
+; LP64D-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 68(s1)
-; LP64D-NEXT:    flw ft0, 20(sp)
+; LP64D-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 64(s1)
-; LP64D-NEXT:    flw ft0, 24(sp)
+; LP64D-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 60(s1)
-; LP64D-NEXT:    flw ft0, 28(sp)
+; LP64D-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 56(s1)
-; LP64D-NEXT:    flw ft0, 32(sp)
+; LP64D-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 52(s1)
-; LP64D-NEXT:    flw ft0, 36(sp)
+; LP64D-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 48(s1)
-; LP64D-NEXT:    flw ft0, 40(sp)
+; LP64D-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 44(s1)
-; LP64D-NEXT:    flw ft0, 44(sp)
+; LP64D-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 40(s1)
-; LP64D-NEXT:    flw ft0, 48(sp)
+; LP64D-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 36(s1)
-; LP64D-NEXT:    flw ft0, 52(sp)
+; LP64D-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 32(s1)
-; LP64D-NEXT:    flw ft0, 56(sp)
+; LP64D-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 28(s1)
-; LP64D-NEXT:    flw ft0, 60(sp)
+; LP64D-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 24(s1)
-; LP64D-NEXT:    flw ft0, 64(sp)
+; LP64D-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 20(s1)
-; LP64D-NEXT:    flw ft0, 68(sp)
+; LP64D-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, 16(s1)
-; LP64D-NEXT:    flw ft0, 72(sp)
+; LP64D-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, %lo(var+12)(s0)
-; LP64D-NEXT:    flw ft0, 76(sp)
+; LP64D-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    flw ft0, 80(sp)
+; LP64D-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, %lo(var+4)(s0)
-; LP64D-NEXT:    flw ft0, 84(sp)
+; LP64D-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
 ; LP64D-NEXT:    fsw ft0, %lo(var)(s0)
-; LP64D-NEXT:    fld fs11, 88(sp)
-; LP64D-NEXT:    fld fs10, 96(sp)
-; LP64D-NEXT:    fld fs9, 104(sp)
-; LP64D-NEXT:    fld fs8, 112(sp)
-; LP64D-NEXT:    fld fs7, 120(sp)
-; LP64D-NEXT:    fld fs6, 128(sp)
-; LP64D-NEXT:    fld fs5, 136(sp)
-; LP64D-NEXT:    fld fs4, 144(sp)
-; LP64D-NEXT:    fld fs3, 152(sp)
-; LP64D-NEXT:    fld fs2, 160(sp)
-; LP64D-NEXT:    fld fs1, 168(sp)
-; LP64D-NEXT:    fld fs0, 176(sp)
-; LP64D-NEXT:    ld s1, 184(sp)
-; LP64D-NEXT:    ld s0, 192(sp)
-; LP64D-NEXT:    ld ra, 200(sp)
+; LP64D-NEXT:    fld fs11, 88(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs10, 96(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs9, 104(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs8, 112(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs7, 120(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs6, 128(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs5, 136(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs4, 144(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs3, 152(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs2, 160(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs1, 168(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs0, 176(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    ld s1, 184(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    ld s0, 192(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    ld ra, 200(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    addi sp, sp, 208
 ; LP64D-NEXT:    ret
   %val = load [32 x float], [32 x float]* @var

diff  --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
index d5c67fb46203..efd0455a821d 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
@@ -160,18 +160,18 @@ define void @callee() nounwind {
 ; ILP32D-LABEL: callee:
 ; ILP32D:       # %bb.0:
 ; ILP32D-NEXT:    addi sp, sp, -96
-; ILP32D-NEXT:    fsd fs0, 88(sp)
-; ILP32D-NEXT:    fsd fs1, 80(sp)
-; ILP32D-NEXT:    fsd fs2, 72(sp)
-; ILP32D-NEXT:    fsd fs3, 64(sp)
-; ILP32D-NEXT:    fsd fs4, 56(sp)
-; ILP32D-NEXT:    fsd fs5, 48(sp)
-; ILP32D-NEXT:    fsd fs6, 40(sp)
-; ILP32D-NEXT:    fsd fs7, 32(sp)
-; ILP32D-NEXT:    fsd fs8, 24(sp)
-; ILP32D-NEXT:    fsd fs9, 16(sp)
-; ILP32D-NEXT:    fsd fs10, 8(sp)
-; ILP32D-NEXT:    fsd fs11, 0(sp)
+; ILP32D-NEXT:    fsd fs0, 88(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs1, 80(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs2, 72(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs3, 64(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs4, 56(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs5, 48(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs6, 40(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs7, 32(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs8, 24(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs9, 16(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui a0, %hi(var)
 ; ILP32D-NEXT:    fld ft0, %lo(var)(a0)
 ; ILP32D-NEXT:    fld ft1, %lo(var+8)(a0)
@@ -238,36 +238,36 @@ define void @callee() nounwind {
 ; ILP32D-NEXT:    fsd ft2, 16(a1)
 ; ILP32D-NEXT:    fsd ft1, %lo(var+8)(a0)
 ; ILP32D-NEXT:    fsd ft0, %lo(var)(a0)
-; ILP32D-NEXT:    fld fs11, 0(sp)
-; ILP32D-NEXT:    fld fs10, 8(sp)
-; ILP32D-NEXT:    fld fs9, 16(sp)
-; ILP32D-NEXT:    fld fs8, 24(sp)
-; ILP32D-NEXT:    fld fs7, 32(sp)
-; ILP32D-NEXT:    fld fs6, 40(sp)
-; ILP32D-NEXT:    fld fs5, 48(sp)
-; ILP32D-NEXT:    fld fs4, 56(sp)
-; ILP32D-NEXT:    fld fs3, 64(sp)
-; ILP32D-NEXT:    fld fs2, 72(sp)
-; ILP32D-NEXT:    fld fs1, 80(sp)
-; ILP32D-NEXT:    fld fs0, 88(sp)
+; ILP32D-NEXT:    fld fs11, 0(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs9, 16(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs8, 24(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs7, 32(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs6, 40(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs5, 48(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs4, 56(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs3, 64(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    addi sp, sp, 96
 ; ILP32D-NEXT:    ret
 ;
 ; LP64D-LABEL: callee:
 ; LP64D:       # %bb.0:
 ; LP64D-NEXT:    addi sp, sp, -96
-; LP64D-NEXT:    fsd fs0, 88(sp)
-; LP64D-NEXT:    fsd fs1, 80(sp)
-; LP64D-NEXT:    fsd fs2, 72(sp)
-; LP64D-NEXT:    fsd fs3, 64(sp)
-; LP64D-NEXT:    fsd fs4, 56(sp)
-; LP64D-NEXT:    fsd fs5, 48(sp)
-; LP64D-NEXT:    fsd fs6, 40(sp)
-; LP64D-NEXT:    fsd fs7, 32(sp)
-; LP64D-NEXT:    fsd fs8, 24(sp)
-; LP64D-NEXT:    fsd fs9, 16(sp)
-; LP64D-NEXT:    fsd fs10, 8(sp)
-; LP64D-NEXT:    fsd fs11, 0(sp)
+; LP64D-NEXT:    fsd fs0, 88(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs1, 80(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs2, 72(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs3, 64(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs4, 56(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs5, 48(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs6, 40(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs7, 32(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs8, 24(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs9, 16(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui a0, %hi(var)
 ; LP64D-NEXT:    fld ft0, %lo(var)(a0)
 ; LP64D-NEXT:    fld ft1, %lo(var+8)(a0)
@@ -334,18 +334,18 @@ define void @callee() nounwind {
 ; LP64D-NEXT:    fsd ft2, 16(a1)
 ; LP64D-NEXT:    fsd ft1, %lo(var+8)(a0)
 ; LP64D-NEXT:    fsd ft0, %lo(var)(a0)
-; LP64D-NEXT:    fld fs11, 0(sp)
-; LP64D-NEXT:    fld fs10, 8(sp)
-; LP64D-NEXT:    fld fs9, 16(sp)
-; LP64D-NEXT:    fld fs8, 24(sp)
-; LP64D-NEXT:    fld fs7, 32(sp)
-; LP64D-NEXT:    fld fs6, 40(sp)
-; LP64D-NEXT:    fld fs5, 48(sp)
-; LP64D-NEXT:    fld fs4, 56(sp)
-; LP64D-NEXT:    fld fs3, 64(sp)
-; LP64D-NEXT:    fld fs2, 72(sp)
-; LP64D-NEXT:    fld fs1, 80(sp)
-; LP64D-NEXT:    fld fs0, 88(sp)
+; LP64D-NEXT:    fld fs11, 0(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs9, 16(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs8, 24(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs7, 32(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs6, 40(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs5, 48(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs4, 56(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs3, 64(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    addi sp, sp, 96
 ; LP64D-NEXT:    ret
   %val = load [32 x double], [32 x double]* @var
@@ -364,349 +364,349 @@ define void @caller() nounwind {
 ; ILP32-LABEL: caller:
 ; ILP32:       # %bb.0:
 ; ILP32-NEXT:    addi sp, sp, -272
-; ILP32-NEXT:    sw ra, 268(sp)
-; ILP32-NEXT:    sw s0, 264(sp)
-; ILP32-NEXT:    sw s1, 260(sp)
+; ILP32-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    sw s1, 260(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    lui s0, %hi(var)
 ; ILP32-NEXT:    fld ft0, %lo(var)(s0)
-; ILP32-NEXT:    fsd ft0, 248(sp)
+; ILP32-NEXT:    fsd ft0, 248(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    fsd ft0, 240(sp)
+; ILP32-NEXT:    fsd ft0, 240(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    addi s1, s0, %lo(var)
 ; ILP32-NEXT:    fld ft0, 16(s1)
-; ILP32-NEXT:    fsd ft0, 232(sp)
+; ILP32-NEXT:    fsd ft0, 232(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 24(s1)
-; ILP32-NEXT:    fsd ft0, 224(sp)
+; ILP32-NEXT:    fsd ft0, 224(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 32(s1)
-; ILP32-NEXT:    fsd ft0, 216(sp)
+; ILP32-NEXT:    fsd ft0, 216(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 40(s1)
-; ILP32-NEXT:    fsd ft0, 208(sp)
+; ILP32-NEXT:    fsd ft0, 208(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 48(s1)
-; ILP32-NEXT:    fsd ft0, 200(sp)
+; ILP32-NEXT:    fsd ft0, 200(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 56(s1)
-; ILP32-NEXT:    fsd ft0, 192(sp)
+; ILP32-NEXT:    fsd ft0, 192(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 64(s1)
-; ILP32-NEXT:    fsd ft0, 184(sp)
+; ILP32-NEXT:    fsd ft0, 184(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 72(s1)
-; ILP32-NEXT:    fsd ft0, 176(sp)
+; ILP32-NEXT:    fsd ft0, 176(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 80(s1)
-; ILP32-NEXT:    fsd ft0, 168(sp)
+; ILP32-NEXT:    fsd ft0, 168(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 88(s1)
-; ILP32-NEXT:    fsd ft0, 160(sp)
+; ILP32-NEXT:    fsd ft0, 160(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 96(s1)
-; ILP32-NEXT:    fsd ft0, 152(sp)
+; ILP32-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 104(s1)
-; ILP32-NEXT:    fsd ft0, 144(sp)
+; ILP32-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 112(s1)
-; ILP32-NEXT:    fsd ft0, 136(sp)
+; ILP32-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 120(s1)
-; ILP32-NEXT:    fsd ft0, 128(sp)
+; ILP32-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 128(s1)
-; ILP32-NEXT:    fsd ft0, 120(sp)
+; ILP32-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 136(s1)
-; ILP32-NEXT:    fsd ft0, 112(sp)
+; ILP32-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 144(s1)
-; ILP32-NEXT:    fsd ft0, 104(sp)
+; ILP32-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 152(s1)
-; ILP32-NEXT:    fsd ft0, 96(sp)
+; ILP32-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 160(s1)
-; ILP32-NEXT:    fsd ft0, 88(sp)
+; ILP32-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 168(s1)
-; ILP32-NEXT:    fsd ft0, 80(sp)
+; ILP32-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 176(s1)
-; ILP32-NEXT:    fsd ft0, 72(sp)
+; ILP32-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 184(s1)
-; ILP32-NEXT:    fsd ft0, 64(sp)
+; ILP32-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 192(s1)
-; ILP32-NEXT:    fsd ft0, 56(sp)
+; ILP32-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 200(s1)
-; ILP32-NEXT:    fsd ft0, 48(sp)
+; ILP32-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 208(s1)
-; ILP32-NEXT:    fsd ft0, 40(sp)
+; ILP32-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 216(s1)
-; ILP32-NEXT:    fsd ft0, 32(sp)
+; ILP32-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 224(s1)
-; ILP32-NEXT:    fsd ft0, 24(sp)
+; ILP32-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 232(s1)
-; ILP32-NEXT:    fsd ft0, 16(sp)
+; ILP32-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 240(s1)
-; ILP32-NEXT:    fsd ft0, 8(sp)
+; ILP32-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    fld ft0, 248(s1)
-; ILP32-NEXT:    fsd ft0, 0(sp)
+; ILP32-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    call callee
-; ILP32-NEXT:    fld ft0, 0(sp)
+; ILP32-NEXT:    fld ft0, 0(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 248(s1)
-; ILP32-NEXT:    fld ft0, 8(sp)
+; ILP32-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 240(s1)
-; ILP32-NEXT:    fld ft0, 16(sp)
+; ILP32-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 232(s1)
-; ILP32-NEXT:    fld ft0, 24(sp)
+; ILP32-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 224(s1)
-; ILP32-NEXT:    fld ft0, 32(sp)
+; ILP32-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 216(s1)
-; ILP32-NEXT:    fld ft0, 40(sp)
+; ILP32-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 208(s1)
-; ILP32-NEXT:    fld ft0, 48(sp)
+; ILP32-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 200(s1)
-; ILP32-NEXT:    fld ft0, 56(sp)
+; ILP32-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 192(s1)
-; ILP32-NEXT:    fld ft0, 64(sp)
+; ILP32-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 184(s1)
-; ILP32-NEXT:    fld ft0, 72(sp)
+; ILP32-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 176(s1)
-; ILP32-NEXT:    fld ft0, 80(sp)
+; ILP32-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 168(s1)
-; ILP32-NEXT:    fld ft0, 88(sp)
+; ILP32-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 160(s1)
-; ILP32-NEXT:    fld ft0, 96(sp)
+; ILP32-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 152(s1)
-; ILP32-NEXT:    fld ft0, 104(sp)
+; ILP32-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 144(s1)
-; ILP32-NEXT:    fld ft0, 112(sp)
+; ILP32-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 136(s1)
-; ILP32-NEXT:    fld ft0, 120(sp)
+; ILP32-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 128(s1)
-; ILP32-NEXT:    fld ft0, 128(sp)
+; ILP32-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 120(s1)
-; ILP32-NEXT:    fld ft0, 136(sp)
+; ILP32-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 112(s1)
-; ILP32-NEXT:    fld ft0, 144(sp)
+; ILP32-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 104(s1)
-; ILP32-NEXT:    fld ft0, 152(sp)
+; ILP32-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 96(s1)
-; ILP32-NEXT:    fld ft0, 160(sp)
+; ILP32-NEXT:    fld ft0, 160(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 88(s1)
-; ILP32-NEXT:    fld ft0, 168(sp)
+; ILP32-NEXT:    fld ft0, 168(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 80(s1)
-; ILP32-NEXT:    fld ft0, 176(sp)
+; ILP32-NEXT:    fld ft0, 176(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 72(s1)
-; ILP32-NEXT:    fld ft0, 184(sp)
+; ILP32-NEXT:    fld ft0, 184(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 64(s1)
-; ILP32-NEXT:    fld ft0, 192(sp)
+; ILP32-NEXT:    fld ft0, 192(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 56(s1)
-; ILP32-NEXT:    fld ft0, 200(sp)
+; ILP32-NEXT:    fld ft0, 200(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 48(s1)
-; ILP32-NEXT:    fld ft0, 208(sp)
+; ILP32-NEXT:    fld ft0, 208(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 40(s1)
-; ILP32-NEXT:    fld ft0, 216(sp)
+; ILP32-NEXT:    fld ft0, 216(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 32(s1)
-; ILP32-NEXT:    fld ft0, 224(sp)
+; ILP32-NEXT:    fld ft0, 224(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 24(s1)
-; ILP32-NEXT:    fld ft0, 232(sp)
+; ILP32-NEXT:    fld ft0, 232(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, 16(s1)
-; ILP32-NEXT:    fld ft0, 240(sp)
+; ILP32-NEXT:    fld ft0, 240(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    fld ft0, 248(sp)
+; ILP32-NEXT:    fld ft0, 248(sp) # 8-byte Folded Reload
 ; ILP32-NEXT:    fsd ft0, %lo(var)(s0)
-; ILP32-NEXT:    lw s1, 260(sp)
-; ILP32-NEXT:    lw s0, 264(sp)
-; ILP32-NEXT:    lw ra, 268(sp)
+; ILP32-NEXT:    lw s1, 260(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    addi sp, sp, 272
 ; ILP32-NEXT:    ret
 ;
 ; LP64-LABEL: caller:
 ; LP64:       # %bb.0:
 ; LP64-NEXT:    addi sp, sp, -288
-; LP64-NEXT:    sd ra, 280(sp)
-; LP64-NEXT:    sd s0, 272(sp)
-; LP64-NEXT:    sd s1, 264(sp)
+; LP64-NEXT:    sd ra, 280(sp) # 8-byte Folded Spill
+; LP64-NEXT:    sd s0, 272(sp) # 8-byte Folded Spill
+; LP64-NEXT:    sd s1, 264(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    lui s0, %hi(var)
 ; LP64-NEXT:    fld ft0, %lo(var)(s0)
-; LP64-NEXT:    fsd ft0, 256(sp)
+; LP64-NEXT:    fsd ft0, 256(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, %lo(var+8)(s0)
-; LP64-NEXT:    fsd ft0, 248(sp)
+; LP64-NEXT:    fsd ft0, 248(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    addi s1, s0, %lo(var)
 ; LP64-NEXT:    fld ft0, 16(s1)
-; LP64-NEXT:    fsd ft0, 240(sp)
+; LP64-NEXT:    fsd ft0, 240(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 24(s1)
-; LP64-NEXT:    fsd ft0, 232(sp)
+; LP64-NEXT:    fsd ft0, 232(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 32(s1)
-; LP64-NEXT:    fsd ft0, 224(sp)
+; LP64-NEXT:    fsd ft0, 224(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 40(s1)
-; LP64-NEXT:    fsd ft0, 216(sp)
+; LP64-NEXT:    fsd ft0, 216(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 48(s1)
-; LP64-NEXT:    fsd ft0, 208(sp)
+; LP64-NEXT:    fsd ft0, 208(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 56(s1)
-; LP64-NEXT:    fsd ft0, 200(sp)
+; LP64-NEXT:    fsd ft0, 200(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 64(s1)
-; LP64-NEXT:    fsd ft0, 192(sp)
+; LP64-NEXT:    fsd ft0, 192(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 72(s1)
-; LP64-NEXT:    fsd ft0, 184(sp)
+; LP64-NEXT:    fsd ft0, 184(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 80(s1)
-; LP64-NEXT:    fsd ft0, 176(sp)
+; LP64-NEXT:    fsd ft0, 176(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 88(s1)
-; LP64-NEXT:    fsd ft0, 168(sp)
+; LP64-NEXT:    fsd ft0, 168(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 96(s1)
-; LP64-NEXT:    fsd ft0, 160(sp)
+; LP64-NEXT:    fsd ft0, 160(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 104(s1)
-; LP64-NEXT:    fsd ft0, 152(sp)
+; LP64-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 112(s1)
-; LP64-NEXT:    fsd ft0, 144(sp)
+; LP64-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 120(s1)
-; LP64-NEXT:    fsd ft0, 136(sp)
+; LP64-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 128(s1)
-; LP64-NEXT:    fsd ft0, 128(sp)
+; LP64-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 136(s1)
-; LP64-NEXT:    fsd ft0, 120(sp)
+; LP64-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 144(s1)
-; LP64-NEXT:    fsd ft0, 112(sp)
+; LP64-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 152(s1)
-; LP64-NEXT:    fsd ft0, 104(sp)
+; LP64-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 160(s1)
-; LP64-NEXT:    fsd ft0, 96(sp)
+; LP64-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 168(s1)
-; LP64-NEXT:    fsd ft0, 88(sp)
+; LP64-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 176(s1)
-; LP64-NEXT:    fsd ft0, 80(sp)
+; LP64-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 184(s1)
-; LP64-NEXT:    fsd ft0, 72(sp)
+; LP64-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 192(s1)
-; LP64-NEXT:    fsd ft0, 64(sp)
+; LP64-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 200(s1)
-; LP64-NEXT:    fsd ft0, 56(sp)
+; LP64-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 208(s1)
-; LP64-NEXT:    fsd ft0, 48(sp)
+; LP64-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 216(s1)
-; LP64-NEXT:    fsd ft0, 40(sp)
+; LP64-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 224(s1)
-; LP64-NEXT:    fsd ft0, 32(sp)
+; LP64-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 232(s1)
-; LP64-NEXT:    fsd ft0, 24(sp)
+; LP64-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 240(s1)
-; LP64-NEXT:    fsd ft0, 16(sp)
+; LP64-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    fld ft0, 248(s1)
-; LP64-NEXT:    fsd ft0, 8(sp)
+; LP64-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    call callee
-; LP64-NEXT:    fld ft0, 8(sp)
+; LP64-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 248(s1)
-; LP64-NEXT:    fld ft0, 16(sp)
+; LP64-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 240(s1)
-; LP64-NEXT:    fld ft0, 24(sp)
+; LP64-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 232(s1)
-; LP64-NEXT:    fld ft0, 32(sp)
+; LP64-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 224(s1)
-; LP64-NEXT:    fld ft0, 40(sp)
+; LP64-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 216(s1)
-; LP64-NEXT:    fld ft0, 48(sp)
+; LP64-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 208(s1)
-; LP64-NEXT:    fld ft0, 56(sp)
+; LP64-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 200(s1)
-; LP64-NEXT:    fld ft0, 64(sp)
+; LP64-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 192(s1)
-; LP64-NEXT:    fld ft0, 72(sp)
+; LP64-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 184(s1)
-; LP64-NEXT:    fld ft0, 80(sp)
+; LP64-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 176(s1)
-; LP64-NEXT:    fld ft0, 88(sp)
+; LP64-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 168(s1)
-; LP64-NEXT:    fld ft0, 96(sp)
+; LP64-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 160(s1)
-; LP64-NEXT:    fld ft0, 104(sp)
+; LP64-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 152(s1)
-; LP64-NEXT:    fld ft0, 112(sp)
+; LP64-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 144(s1)
-; LP64-NEXT:    fld ft0, 120(sp)
+; LP64-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 136(s1)
-; LP64-NEXT:    fld ft0, 128(sp)
+; LP64-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 128(s1)
-; LP64-NEXT:    fld ft0, 136(sp)
+; LP64-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 120(s1)
-; LP64-NEXT:    fld ft0, 144(sp)
+; LP64-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 112(s1)
-; LP64-NEXT:    fld ft0, 152(sp)
+; LP64-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 104(s1)
-; LP64-NEXT:    fld ft0, 160(sp)
+; LP64-NEXT:    fld ft0, 160(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 96(s1)
-; LP64-NEXT:    fld ft0, 168(sp)
+; LP64-NEXT:    fld ft0, 168(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 88(s1)
-; LP64-NEXT:    fld ft0, 176(sp)
+; LP64-NEXT:    fld ft0, 176(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 80(s1)
-; LP64-NEXT:    fld ft0, 184(sp)
+; LP64-NEXT:    fld ft0, 184(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 72(s1)
-; LP64-NEXT:    fld ft0, 192(sp)
+; LP64-NEXT:    fld ft0, 192(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 64(s1)
-; LP64-NEXT:    fld ft0, 200(sp)
+; LP64-NEXT:    fld ft0, 200(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 56(s1)
-; LP64-NEXT:    fld ft0, 208(sp)
+; LP64-NEXT:    fld ft0, 208(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 48(s1)
-; LP64-NEXT:    fld ft0, 216(sp)
+; LP64-NEXT:    fld ft0, 216(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 40(s1)
-; LP64-NEXT:    fld ft0, 224(sp)
+; LP64-NEXT:    fld ft0, 224(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 32(s1)
-; LP64-NEXT:    fld ft0, 232(sp)
+; LP64-NEXT:    fld ft0, 232(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 24(s1)
-; LP64-NEXT:    fld ft0, 240(sp)
+; LP64-NEXT:    fld ft0, 240(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, 16(s1)
-; LP64-NEXT:    fld ft0, 248(sp)
+; LP64-NEXT:    fld ft0, 248(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, %lo(var+8)(s0)
-; LP64-NEXT:    fld ft0, 256(sp)
+; LP64-NEXT:    fld ft0, 256(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    fsd ft0, %lo(var)(s0)
-; LP64-NEXT:    ld s1, 264(sp)
-; LP64-NEXT:    ld s0, 272(sp)
-; LP64-NEXT:    ld ra, 280(sp)
+; LP64-NEXT:    ld s1, 264(sp) # 8-byte Folded Reload
+; LP64-NEXT:    ld s0, 272(sp) # 8-byte Folded Reload
+; LP64-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    addi sp, sp, 288
 ; LP64-NEXT:    ret
 ;
 ; ILP32D-LABEL: caller:
 ; ILP32D:       # %bb.0:
 ; ILP32D-NEXT:    addi sp, sp, -272
-; ILP32D-NEXT:    sw ra, 268(sp)
-; ILP32D-NEXT:    sw s0, 264(sp)
-; ILP32D-NEXT:    sw s1, 260(sp)
-; ILP32D-NEXT:    fsd fs0, 248(sp)
-; ILP32D-NEXT:    fsd fs1, 240(sp)
-; ILP32D-NEXT:    fsd fs2, 232(sp)
-; ILP32D-NEXT:    fsd fs3, 224(sp)
-; ILP32D-NEXT:    fsd fs4, 216(sp)
-; ILP32D-NEXT:    fsd fs5, 208(sp)
-; ILP32D-NEXT:    fsd fs6, 200(sp)
-; ILP32D-NEXT:    fsd fs7, 192(sp)
-; ILP32D-NEXT:    fsd fs8, 184(sp)
-; ILP32D-NEXT:    fsd fs9, 176(sp)
-; ILP32D-NEXT:    fsd fs10, 168(sp)
-; ILP32D-NEXT:    fsd fs11, 160(sp)
+; ILP32D-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    sw s1, 260(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    fsd fs0, 248(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs1, 240(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs2, 232(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs3, 224(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs4, 216(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs5, 208(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs6, 200(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs7, 192(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs8, 184(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs9, 176(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs10, 168(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fsd fs11, 160(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui s0, %hi(var)
 ; ILP32D-NEXT:    fld ft0, %lo(var)(s0)
-; ILP32D-NEXT:    fsd ft0, 152(sp)
+; ILP32D-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    fsd ft0, 144(sp)
+; ILP32D-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    addi s1, s0, %lo(var)
 ; ILP32D-NEXT:    fld ft0, 16(s1)
-; ILP32D-NEXT:    fsd ft0, 136(sp)
+; ILP32D-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 24(s1)
-; ILP32D-NEXT:    fsd ft0, 128(sp)
+; ILP32D-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 32(s1)
-; ILP32D-NEXT:    fsd ft0, 120(sp)
+; ILP32D-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 40(s1)
-; ILP32D-NEXT:    fsd ft0, 112(sp)
+; ILP32D-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 48(s1)
-; ILP32D-NEXT:    fsd ft0, 104(sp)
+; ILP32D-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 56(s1)
-; ILP32D-NEXT:    fsd ft0, 96(sp)
+; ILP32D-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 64(s1)
-; ILP32D-NEXT:    fsd ft0, 88(sp)
+; ILP32D-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 72(s1)
-; ILP32D-NEXT:    fsd ft0, 80(sp)
+; ILP32D-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 80(s1)
-; ILP32D-NEXT:    fsd ft0, 72(sp)
+; ILP32D-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 88(s1)
-; ILP32D-NEXT:    fsd ft0, 64(sp)
+; ILP32D-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 96(s1)
-; ILP32D-NEXT:    fsd ft0, 56(sp)
+; ILP32D-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 104(s1)
-; ILP32D-NEXT:    fsd ft0, 48(sp)
+; ILP32D-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 112(s1)
-; ILP32D-NEXT:    fsd ft0, 40(sp)
+; ILP32D-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 120(s1)
-; ILP32D-NEXT:    fsd ft0, 32(sp)
+; ILP32D-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 128(s1)
-; ILP32D-NEXT:    fsd ft0, 24(sp)
+; ILP32D-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 136(s1)
-; ILP32D-NEXT:    fsd ft0, 16(sp)
+; ILP32D-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 144(s1)
-; ILP32D-NEXT:    fsd ft0, 8(sp)
+; ILP32D-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld ft0, 152(s1)
-; ILP32D-NEXT:    fsd ft0, 0(sp)
+; ILP32D-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld fs8, 160(s1)
 ; ILP32D-NEXT:    fld fs9, 168(s1)
 ; ILP32D-NEXT:    fld fs10, 176(s1)
@@ -732,124 +732,124 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fsd fs10, 176(s1)
 ; ILP32D-NEXT:    fsd fs9, 168(s1)
 ; ILP32D-NEXT:    fsd fs8, 160(s1)
-; ILP32D-NEXT:    fld ft0, 0(sp)
+; ILP32D-NEXT:    fld ft0, 0(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 152(s1)
-; ILP32D-NEXT:    fld ft0, 8(sp)
+; ILP32D-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 144(s1)
-; ILP32D-NEXT:    fld ft0, 16(sp)
+; ILP32D-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 136(s1)
-; ILP32D-NEXT:    fld ft0, 24(sp)
+; ILP32D-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 128(s1)
-; ILP32D-NEXT:    fld ft0, 32(sp)
+; ILP32D-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 120(s1)
-; ILP32D-NEXT:    fld ft0, 40(sp)
+; ILP32D-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 112(s1)
-; ILP32D-NEXT:    fld ft0, 48(sp)
+; ILP32D-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 104(s1)
-; ILP32D-NEXT:    fld ft0, 56(sp)
+; ILP32D-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 96(s1)
-; ILP32D-NEXT:    fld ft0, 64(sp)
+; ILP32D-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 88(s1)
-; ILP32D-NEXT:    fld ft0, 72(sp)
+; ILP32D-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 80(s1)
-; ILP32D-NEXT:    fld ft0, 80(sp)
+; ILP32D-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 72(s1)
-; ILP32D-NEXT:    fld ft0, 88(sp)
+; ILP32D-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 64(s1)
-; ILP32D-NEXT:    fld ft0, 96(sp)
+; ILP32D-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 56(s1)
-; ILP32D-NEXT:    fld ft0, 104(sp)
+; ILP32D-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 48(s1)
-; ILP32D-NEXT:    fld ft0, 112(sp)
+; ILP32D-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 40(s1)
-; ILP32D-NEXT:    fld ft0, 120(sp)
+; ILP32D-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 32(s1)
-; ILP32D-NEXT:    fld ft0, 128(sp)
+; ILP32D-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 24(s1)
-; ILP32D-NEXT:    fld ft0, 136(sp)
+; ILP32D-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, 16(s1)
-; ILP32D-NEXT:    fld ft0, 144(sp)
+; ILP32D-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    fld ft0, 152(sp)
+; ILP32D-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fsd ft0, %lo(var)(s0)
-; ILP32D-NEXT:    fld fs11, 160(sp)
-; ILP32D-NEXT:    fld fs10, 168(sp)
-; ILP32D-NEXT:    fld fs9, 176(sp)
-; ILP32D-NEXT:    fld fs8, 184(sp)
-; ILP32D-NEXT:    fld fs7, 192(sp)
-; ILP32D-NEXT:    fld fs6, 200(sp)
-; ILP32D-NEXT:    fld fs5, 208(sp)
-; ILP32D-NEXT:    fld fs4, 216(sp)
-; ILP32D-NEXT:    fld fs3, 224(sp)
-; ILP32D-NEXT:    fld fs2, 232(sp)
-; ILP32D-NEXT:    fld fs1, 240(sp)
-; ILP32D-NEXT:    fld fs0, 248(sp)
-; ILP32D-NEXT:    lw s1, 260(sp)
-; ILP32D-NEXT:    lw s0, 264(sp)
-; ILP32D-NEXT:    lw ra, 268(sp)
+; ILP32D-NEXT:    fld fs11, 160(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs10, 168(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs9, 176(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs8, 184(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs7, 192(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs6, 200(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs5, 208(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs4, 216(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs3, 224(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs2, 232(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs1, 240(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fld fs0, 248(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    lw s1, 260(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    addi sp, sp, 272
 ; ILP32D-NEXT:    ret
 ;
 ; LP64D-LABEL: caller:
 ; LP64D:       # %bb.0:
 ; LP64D-NEXT:    addi sp, sp, -288
-; LP64D-NEXT:    sd ra, 280(sp)
-; LP64D-NEXT:    sd s0, 272(sp)
-; LP64D-NEXT:    sd s1, 264(sp)
-; LP64D-NEXT:    fsd fs0, 256(sp)
-; LP64D-NEXT:    fsd fs1, 248(sp)
-; LP64D-NEXT:    fsd fs2, 240(sp)
-; LP64D-NEXT:    fsd fs3, 232(sp)
-; LP64D-NEXT:    fsd fs4, 224(sp)
-; LP64D-NEXT:    fsd fs5, 216(sp)
-; LP64D-NEXT:    fsd fs6, 208(sp)
-; LP64D-NEXT:    fsd fs7, 200(sp)
-; LP64D-NEXT:    fsd fs8, 192(sp)
-; LP64D-NEXT:    fsd fs9, 184(sp)
-; LP64D-NEXT:    fsd fs10, 176(sp)
-; LP64D-NEXT:    fsd fs11, 168(sp)
+; LP64D-NEXT:    sd ra, 280(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    sd s0, 272(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    sd s1, 264(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs0, 256(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs1, 248(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs2, 240(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs3, 232(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs4, 224(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs5, 216(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs6, 208(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs7, 200(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs8, 192(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs9, 184(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs10, 176(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fsd fs11, 168(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui s0, %hi(var)
 ; LP64D-NEXT:    fld ft0, %lo(var)(s0)
-; LP64D-NEXT:    fsd ft0, 160(sp)
+; LP64D-NEXT:    fsd ft0, 160(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    fsd ft0, 152(sp)
+; LP64D-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    addi s1, s0, %lo(var)
 ; LP64D-NEXT:    fld ft0, 16(s1)
-; LP64D-NEXT:    fsd ft0, 144(sp)
+; LP64D-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 24(s1)
-; LP64D-NEXT:    fsd ft0, 136(sp)
+; LP64D-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 32(s1)
-; LP64D-NEXT:    fsd ft0, 128(sp)
+; LP64D-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 40(s1)
-; LP64D-NEXT:    fsd ft0, 120(sp)
+; LP64D-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 48(s1)
-; LP64D-NEXT:    fsd ft0, 112(sp)
+; LP64D-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 56(s1)
-; LP64D-NEXT:    fsd ft0, 104(sp)
+; LP64D-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 64(s1)
-; LP64D-NEXT:    fsd ft0, 96(sp)
+; LP64D-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 72(s1)
-; LP64D-NEXT:    fsd ft0, 88(sp)
+; LP64D-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 80(s1)
-; LP64D-NEXT:    fsd ft0, 80(sp)
+; LP64D-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 88(s1)
-; LP64D-NEXT:    fsd ft0, 72(sp)
+; LP64D-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 96(s1)
-; LP64D-NEXT:    fsd ft0, 64(sp)
+; LP64D-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 104(s1)
-; LP64D-NEXT:    fsd ft0, 56(sp)
+; LP64D-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 112(s1)
-; LP64D-NEXT:    fsd ft0, 48(sp)
+; LP64D-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 120(s1)
-; LP64D-NEXT:    fsd ft0, 40(sp)
+; LP64D-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 128(s1)
-; LP64D-NEXT:    fsd ft0, 32(sp)
+; LP64D-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 136(s1)
-; LP64D-NEXT:    fsd ft0, 24(sp)
+; LP64D-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 144(s1)
-; LP64D-NEXT:    fsd ft0, 16(sp)
+; LP64D-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld ft0, 152(s1)
-; LP64D-NEXT:    fsd ft0, 8(sp)
+; LP64D-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld fs8, 160(s1)
 ; LP64D-NEXT:    fld fs9, 168(s1)
 ; LP64D-NEXT:    fld fs10, 176(s1)
@@ -875,61 +875,61 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fsd fs10, 176(s1)
 ; LP64D-NEXT:    fsd fs9, 168(s1)
 ; LP64D-NEXT:    fsd fs8, 160(s1)
-; LP64D-NEXT:    fld ft0, 8(sp)
+; LP64D-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 152(s1)
-; LP64D-NEXT:    fld ft0, 16(sp)
+; LP64D-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 144(s1)
-; LP64D-NEXT:    fld ft0, 24(sp)
+; LP64D-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 136(s1)
-; LP64D-NEXT:    fld ft0, 32(sp)
+; LP64D-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 128(s1)
-; LP64D-NEXT:    fld ft0, 40(sp)
+; LP64D-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 120(s1)
-; LP64D-NEXT:    fld ft0, 48(sp)
+; LP64D-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 112(s1)
-; LP64D-NEXT:    fld ft0, 56(sp)
+; LP64D-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 104(s1)
-; LP64D-NEXT:    fld ft0, 64(sp)
+; LP64D-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 96(s1)
-; LP64D-NEXT:    fld ft0, 72(sp)
+; LP64D-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 88(s1)
-; LP64D-NEXT:    fld ft0, 80(sp)
+; LP64D-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 80(s1)
-; LP64D-NEXT:    fld ft0, 88(sp)
+; LP64D-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 72(s1)
-; LP64D-NEXT:    fld ft0, 96(sp)
+; LP64D-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 64(s1)
-; LP64D-NEXT:    fld ft0, 104(sp)
+; LP64D-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 56(s1)
-; LP64D-NEXT:    fld ft0, 112(sp)
+; LP64D-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 48(s1)
-; LP64D-NEXT:    fld ft0, 120(sp)
+; LP64D-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 40(s1)
-; LP64D-NEXT:    fld ft0, 128(sp)
+; LP64D-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 32(s1)
-; LP64D-NEXT:    fld ft0, 136(sp)
+; LP64D-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 24(s1)
-; LP64D-NEXT:    fld ft0, 144(sp)
+; LP64D-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, 16(s1)
-; LP64D-NEXT:    fld ft0, 152(sp)
+; LP64D-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    fld ft0, 160(sp)
+; LP64D-NEXT:    fld ft0, 160(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fsd ft0, %lo(var)(s0)
-; LP64D-NEXT:    fld fs11, 168(sp)
-; LP64D-NEXT:    fld fs10, 176(sp)
-; LP64D-NEXT:    fld fs9, 184(sp)
-; LP64D-NEXT:    fld fs8, 192(sp)
-; LP64D-NEXT:    fld fs7, 200(sp)
-; LP64D-NEXT:    fld fs6, 208(sp)
-; LP64D-NEXT:    fld fs5, 216(sp)
-; LP64D-NEXT:    fld fs4, 224(sp)
-; LP64D-NEXT:    fld fs3, 232(sp)
-; LP64D-NEXT:    fld fs2, 240(sp)
-; LP64D-NEXT:    fld fs1, 248(sp)
-; LP64D-NEXT:    fld fs0, 256(sp)
-; LP64D-NEXT:    ld s1, 264(sp)
-; LP64D-NEXT:    ld s0, 272(sp)
-; LP64D-NEXT:    ld ra, 280(sp)
+; LP64D-NEXT:    fld fs11, 168(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs10, 176(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs9, 184(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs8, 192(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs7, 200(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs6, 208(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs5, 216(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs4, 224(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs3, 232(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs2, 240(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs1, 248(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fld fs0, 256(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    ld s1, 264(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    ld s0, 272(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    addi sp, sp, 288
 ; LP64D-NEXT:    ret
   %val = load [32 x double], [32 x double]* @var

diff  --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 99c07a35226c..76c87ed1aa7d 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -29,33 +29,33 @@ define void @callee() nounwind {
 ; RV32I-LABEL: callee:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -80
-; RV32I-NEXT:    sw ra, 76(sp)
-; RV32I-NEXT:    sw s0, 72(sp)
-; RV32I-NEXT:    sw s1, 68(sp)
-; RV32I-NEXT:    sw s2, 64(sp)
-; RV32I-NEXT:    sw s3, 60(sp)
-; RV32I-NEXT:    sw s4, 56(sp)
-; RV32I-NEXT:    sw s5, 52(sp)
-; RV32I-NEXT:    sw s6, 48(sp)
-; RV32I-NEXT:    sw s7, 44(sp)
-; RV32I-NEXT:    sw s8, 40(sp)
-; RV32I-NEXT:    sw s9, 36(sp)
-; RV32I-NEXT:    sw s10, 32(sp)
-; RV32I-NEXT:    sw s11, 28(sp)
+; RV32I-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 68(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 64(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s10, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s11, 28(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a7, %hi(var)
 ; RV32I-NEXT:    lw a0, %lo(var)(a7)
-; RV32I-NEXT:    sw a0, 24(sp)
+; RV32I-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, %lo(var+4)(a7)
-; RV32I-NEXT:    sw a0, 20(sp)
+; RV32I-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, %lo(var+8)(a7)
-; RV32I-NEXT:    sw a0, 16(sp)
+; RV32I-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, %lo(var+12)(a7)
-; RV32I-NEXT:    sw a0, 12(sp)
+; RV32I-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a5, a7, %lo(var)
 ; RV32I-NEXT:    lw a0, 16(a5)
-; RV32I-NEXT:    sw a0, 8(sp)
+; RV32I-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 20(a5)
-; RV32I-NEXT:    sw a0, 4(sp)
+; RV32I-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw t4, 24(a5)
 ; RV32I-NEXT:    lw t5, 28(a5)
 ; RV32I-NEXT:    lw t6, 32(a5)
@@ -108,67 +108,67 @@ define void @callee() nounwind {
 ; RV32I-NEXT:    sw t6, 32(a5)
 ; RV32I-NEXT:    sw t5, 28(a5)
 ; RV32I-NEXT:    sw t4, 24(a5)
-; RV32I-NEXT:    lw a0, 4(sp)
+; RV32I-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 20(a5)
-; RV32I-NEXT:    lw a0, 8(sp)
+; RV32I-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 16(a5)
-; RV32I-NEXT:    lw a0, 12(sp)
+; RV32I-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var+12)(a7)
-; RV32I-NEXT:    lw a0, 16(sp)
+; RV32I-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var+8)(a7)
-; RV32I-NEXT:    lw a0, 20(sp)
+; RV32I-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var+4)(a7)
-; RV32I-NEXT:    lw a0, 24(sp)
+; RV32I-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var)(a7)
-; RV32I-NEXT:    lw s11, 28(sp)
-; RV32I-NEXT:    lw s10, 32(sp)
-; RV32I-NEXT:    lw s9, 36(sp)
-; RV32I-NEXT:    lw s8, 40(sp)
-; RV32I-NEXT:    lw s7, 44(sp)
-; RV32I-NEXT:    lw s6, 48(sp)
-; RV32I-NEXT:    lw s5, 52(sp)
-; RV32I-NEXT:    lw s4, 56(sp)
-; RV32I-NEXT:    lw s3, 60(sp)
-; RV32I-NEXT:    lw s2, 64(sp)
-; RV32I-NEXT:    lw s1, 68(sp)
-; RV32I-NEXT:    lw s0, 72(sp)
-; RV32I-NEXT:    lw ra, 76(sp)
+; RV32I-NEXT:    lw s11, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s10, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s9, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 64(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 80
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-WITH-FP-LABEL: callee:
 ; RV32I-WITH-FP:       # %bb.0:
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, -80
-; RV32I-WITH-FP-NEXT:    sw ra, 76(sp)
-; RV32I-WITH-FP-NEXT:    sw s0, 72(sp)
-; RV32I-WITH-FP-NEXT:    sw s1, 68(sp)
-; RV32I-WITH-FP-NEXT:    sw s2, 64(sp)
-; RV32I-WITH-FP-NEXT:    sw s3, 60(sp)
-; RV32I-WITH-FP-NEXT:    sw s4, 56(sp)
-; RV32I-WITH-FP-NEXT:    sw s5, 52(sp)
-; RV32I-WITH-FP-NEXT:    sw s6, 48(sp)
-; RV32I-WITH-FP-NEXT:    sw s7, 44(sp)
-; RV32I-WITH-FP-NEXT:    sw s8, 40(sp)
-; RV32I-WITH-FP-NEXT:    sw s9, 36(sp)
-; RV32I-WITH-FP-NEXT:    sw s10, 32(sp)
-; RV32I-WITH-FP-NEXT:    sw s11, 28(sp)
+; RV32I-WITH-FP-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s1, 68(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s2, 64(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s3, 60(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s4, 56(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s5, 52(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s6, 48(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s7, 44(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s8, 40(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s9, 36(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s10, 32(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s11, 28(sp) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    addi s0, sp, 80
 ; RV32I-WITH-FP-NEXT:    lui a7, %hi(var)
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var)(a7)
-; RV32I-WITH-FP-NEXT:    sw a0, -56(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -56(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var+4)(a7)
-; RV32I-WITH-FP-NEXT:    sw a0, -60(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -60(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var+8)(a7)
-; RV32I-WITH-FP-NEXT:    sw a0, -64(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -64(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var+12)(a7)
-; RV32I-WITH-FP-NEXT:    sw a0, -68(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -68(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    addi a5, a7, %lo(var)
 ; RV32I-WITH-FP-NEXT:    lw a0, 16(a5)
-; RV32I-WITH-FP-NEXT:    sw a0, -72(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -72(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 20(a5)
-; RV32I-WITH-FP-NEXT:    sw a0, -76(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -76(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 24(a5)
-; RV32I-WITH-FP-NEXT:    sw a0, -80(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -80(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw t5, 28(a5)
 ; RV32I-WITH-FP-NEXT:    lw t6, 32(a5)
 ; RV32I-WITH-FP-NEXT:    lw s2, 36(a5)
@@ -219,66 +219,66 @@ define void @callee() nounwind {
 ; RV32I-WITH-FP-NEXT:    sw s2, 36(a5)
 ; RV32I-WITH-FP-NEXT:    sw t6, 32(a5)
 ; RV32I-WITH-FP-NEXT:    sw t5, 28(a5)
-; RV32I-WITH-FP-NEXT:    lw a0, -80(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -80(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 24(a5)
-; RV32I-WITH-FP-NEXT:    lw a0, -76(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -76(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 20(a5)
-; RV32I-WITH-FP-NEXT:    lw a0, -72(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -72(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 16(a5)
-; RV32I-WITH-FP-NEXT:    lw a0, -68(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -68(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var+12)(a7)
-; RV32I-WITH-FP-NEXT:    lw a0, -64(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -64(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var+8)(a7)
-; RV32I-WITH-FP-NEXT:    lw a0, -60(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -60(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var+4)(a7)
-; RV32I-WITH-FP-NEXT:    lw a0, -56(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -56(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var)(a7)
-; RV32I-WITH-FP-NEXT:    lw s11, 28(sp)
-; RV32I-WITH-FP-NEXT:    lw s10, 32(sp)
-; RV32I-WITH-FP-NEXT:    lw s9, 36(sp)
-; RV32I-WITH-FP-NEXT:    lw s8, 40(sp)
-; RV32I-WITH-FP-NEXT:    lw s7, 44(sp)
-; RV32I-WITH-FP-NEXT:    lw s6, 48(sp)
-; RV32I-WITH-FP-NEXT:    lw s5, 52(sp)
-; RV32I-WITH-FP-NEXT:    lw s4, 56(sp)
-; RV32I-WITH-FP-NEXT:    lw s3, 60(sp)
-; RV32I-WITH-FP-NEXT:    lw s2, 64(sp)
-; RV32I-WITH-FP-NEXT:    lw s1, 68(sp)
-; RV32I-WITH-FP-NEXT:    lw s0, 72(sp)
-; RV32I-WITH-FP-NEXT:    lw ra, 76(sp)
+; RV32I-WITH-FP-NEXT:    lw s11, 28(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s10, 32(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s9, 36(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s8, 40(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s7, 44(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s6, 48(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s5, 52(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s4, 56(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s3, 60(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s2, 64(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s1, 68(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, 80
 ; RV32I-WITH-FP-NEXT:    ret
 ;
 ; RV64I-LABEL: callee:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -160
-; RV64I-NEXT:    sd ra, 152(sp)
-; RV64I-NEXT:    sd s0, 144(sp)
-; RV64I-NEXT:    sd s1, 136(sp)
-; RV64I-NEXT:    sd s2, 128(sp)
-; RV64I-NEXT:    sd s3, 120(sp)
-; RV64I-NEXT:    sd s4, 112(sp)
-; RV64I-NEXT:    sd s5, 104(sp)
-; RV64I-NEXT:    sd s6, 96(sp)
-; RV64I-NEXT:    sd s7, 88(sp)
-; RV64I-NEXT:    sd s8, 80(sp)
-; RV64I-NEXT:    sd s9, 72(sp)
-; RV64I-NEXT:    sd s10, 64(sp)
-; RV64I-NEXT:    sd s11, 56(sp)
+; RV64I-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 144(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s10, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s11, 56(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lui a7, %hi(var)
 ; RV64I-NEXT:    lw a0, %lo(var)(a7)
-; RV64I-NEXT:    sd a0, 48(sp)
+; RV64I-NEXT:    sd a0, 48(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, %lo(var+4)(a7)
-; RV64I-NEXT:    sd a0, 40(sp)
+; RV64I-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, %lo(var+8)(a7)
-; RV64I-NEXT:    sd a0, 32(sp)
+; RV64I-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, %lo(var+12)(a7)
-; RV64I-NEXT:    sd a0, 24(sp)
+; RV64I-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a5, a7, %lo(var)
 ; RV64I-NEXT:    lw a0, 16(a5)
-; RV64I-NEXT:    sd a0, 16(sp)
+; RV64I-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 20(a5)
-; RV64I-NEXT:    sd a0, 8(sp)
+; RV64I-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw t4, 24(a5)
 ; RV64I-NEXT:    lw t5, 28(a5)
 ; RV64I-NEXT:    lw t6, 32(a5)
@@ -331,67 +331,67 @@ define void @callee() nounwind {
 ; RV64I-NEXT:    sw t6, 32(a5)
 ; RV64I-NEXT:    sw t5, 28(a5)
 ; RV64I-NEXT:    sw t4, 24(a5)
-; RV64I-NEXT:    ld a0, 8(sp)
+; RV64I-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 20(a5)
-; RV64I-NEXT:    ld a0, 16(sp)
+; RV64I-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 16(a5)
-; RV64I-NEXT:    ld a0, 24(sp)
+; RV64I-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var+12)(a7)
-; RV64I-NEXT:    ld a0, 32(sp)
+; RV64I-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var+8)(a7)
-; RV64I-NEXT:    ld a0, 40(sp)
+; RV64I-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var+4)(a7)
-; RV64I-NEXT:    ld a0, 48(sp)
+; RV64I-NEXT:    ld a0, 48(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var)(a7)
-; RV64I-NEXT:    ld s11, 56(sp)
-; RV64I-NEXT:    ld s10, 64(sp)
-; RV64I-NEXT:    ld s9, 72(sp)
-; RV64I-NEXT:    ld s8, 80(sp)
-; RV64I-NEXT:    ld s7, 88(sp)
-; RV64I-NEXT:    ld s6, 96(sp)
-; RV64I-NEXT:    ld s5, 104(sp)
-; RV64I-NEXT:    ld s4, 112(sp)
-; RV64I-NEXT:    ld s3, 120(sp)
-; RV64I-NEXT:    ld s2, 128(sp)
-; RV64I-NEXT:    ld s1, 136(sp)
-; RV64I-NEXT:    ld s0, 144(sp)
-; RV64I-NEXT:    ld ra, 152(sp)
+; RV64I-NEXT:    ld s11, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s10, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s9, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 160
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-WITH-FP-LABEL: callee:
 ; RV64I-WITH-FP:       # %bb.0:
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, -160
-; RV64I-WITH-FP-NEXT:    sd ra, 152(sp)
-; RV64I-WITH-FP-NEXT:    sd s0, 144(sp)
-; RV64I-WITH-FP-NEXT:    sd s1, 136(sp)
-; RV64I-WITH-FP-NEXT:    sd s2, 128(sp)
-; RV64I-WITH-FP-NEXT:    sd s3, 120(sp)
-; RV64I-WITH-FP-NEXT:    sd s4, 112(sp)
-; RV64I-WITH-FP-NEXT:    sd s5, 104(sp)
-; RV64I-WITH-FP-NEXT:    sd s6, 96(sp)
-; RV64I-WITH-FP-NEXT:    sd s7, 88(sp)
-; RV64I-WITH-FP-NEXT:    sd s8, 80(sp)
-; RV64I-WITH-FP-NEXT:    sd s9, 72(sp)
-; RV64I-WITH-FP-NEXT:    sd s10, 64(sp)
-; RV64I-WITH-FP-NEXT:    sd s11, 56(sp)
+; RV64I-WITH-FP-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s0, 144(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s1, 136(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s2, 128(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s3, 120(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s4, 112(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s5, 104(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s6, 96(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s7, 88(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s8, 80(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s9, 72(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s10, 64(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s11, 56(sp) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    addi s0, sp, 160
 ; RV64I-WITH-FP-NEXT:    lui a7, %hi(var)
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var)(a7)
-; RV64I-WITH-FP-NEXT:    sd a0, -112(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -112(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var+4)(a7)
-; RV64I-WITH-FP-NEXT:    sd a0, -120(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -120(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var+8)(a7)
-; RV64I-WITH-FP-NEXT:    sd a0, -128(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -128(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var+12)(a7)
-; RV64I-WITH-FP-NEXT:    sd a0, -136(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -136(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    addi a5, a7, %lo(var)
 ; RV64I-WITH-FP-NEXT:    lw a0, 16(a5)
-; RV64I-WITH-FP-NEXT:    sd a0, -144(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -144(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 20(a5)
-; RV64I-WITH-FP-NEXT:    sd a0, -152(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -152(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 24(a5)
-; RV64I-WITH-FP-NEXT:    sd a0, -160(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -160(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw t5, 28(a5)
 ; RV64I-WITH-FP-NEXT:    lw t6, 32(a5)
 ; RV64I-WITH-FP-NEXT:    lw s2, 36(a5)
@@ -442,33 +442,33 @@ define void @callee() nounwind {
 ; RV64I-WITH-FP-NEXT:    sw s2, 36(a5)
 ; RV64I-WITH-FP-NEXT:    sw t6, 32(a5)
 ; RV64I-WITH-FP-NEXT:    sw t5, 28(a5)
-; RV64I-WITH-FP-NEXT:    ld a0, -160(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -160(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 24(a5)
-; RV64I-WITH-FP-NEXT:    ld a0, -152(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -152(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 20(a5)
-; RV64I-WITH-FP-NEXT:    ld a0, -144(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -144(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 16(a5)
-; RV64I-WITH-FP-NEXT:    ld a0, -136(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -136(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var+12)(a7)
-; RV64I-WITH-FP-NEXT:    ld a0, -128(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -128(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var+8)(a7)
-; RV64I-WITH-FP-NEXT:    ld a0, -120(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -120(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var+4)(a7)
-; RV64I-WITH-FP-NEXT:    ld a0, -112(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -112(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var)(a7)
-; RV64I-WITH-FP-NEXT:    ld s11, 56(sp)
-; RV64I-WITH-FP-NEXT:    ld s10, 64(sp)
-; RV64I-WITH-FP-NEXT:    ld s9, 72(sp)
-; RV64I-WITH-FP-NEXT:    ld s8, 80(sp)
-; RV64I-WITH-FP-NEXT:    ld s7, 88(sp)
-; RV64I-WITH-FP-NEXT:    ld s6, 96(sp)
-; RV64I-WITH-FP-NEXT:    ld s5, 104(sp)
-; RV64I-WITH-FP-NEXT:    ld s4, 112(sp)
-; RV64I-WITH-FP-NEXT:    ld s3, 120(sp)
-; RV64I-WITH-FP-NEXT:    ld s2, 128(sp)
-; RV64I-WITH-FP-NEXT:    ld s1, 136(sp)
-; RV64I-WITH-FP-NEXT:    ld s0, 144(sp)
-; RV64I-WITH-FP-NEXT:    ld ra, 152(sp)
+; RV64I-WITH-FP-NEXT:    ld s11, 56(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s10, 64(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s9, 72(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s8, 80(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s7, 88(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s6, 96(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s5, 104(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s4, 112(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s3, 120(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s2, 128(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, 160
 ; RV64I-WITH-FP-NEXT:    ret
   %val = load [32 x i32], [32 x i32]* @var
@@ -483,65 +483,65 @@ define void @caller() nounwind {
 ; RV32I-LABEL: caller:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -144
-; RV32I-NEXT:    sw ra, 140(sp)
-; RV32I-NEXT:    sw s0, 136(sp)
-; RV32I-NEXT:    sw s1, 132(sp)
-; RV32I-NEXT:    sw s2, 128(sp)
-; RV32I-NEXT:    sw s3, 124(sp)
-; RV32I-NEXT:    sw s4, 120(sp)
-; RV32I-NEXT:    sw s5, 116(sp)
-; RV32I-NEXT:    sw s6, 112(sp)
-; RV32I-NEXT:    sw s7, 108(sp)
-; RV32I-NEXT:    sw s8, 104(sp)
-; RV32I-NEXT:    sw s9, 100(sp)
-; RV32I-NEXT:    sw s10, 96(sp)
-; RV32I-NEXT:    sw s11, 92(sp)
+; RV32I-NEXT:    sw ra, 140(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 136(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 132(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 128(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 124(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 120(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 116(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 112(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 108(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 104(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 100(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s10, 96(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s11, 92(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui s0, %hi(var)
 ; RV32I-NEXT:    lw a0, %lo(var)(s0)
-; RV32I-NEXT:    sw a0, 88(sp)
+; RV32I-NEXT:    sw a0, 88(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, %lo(var+4)(s0)
-; RV32I-NEXT:    sw a0, 84(sp)
+; RV32I-NEXT:    sw a0, 84(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, %lo(var+8)(s0)
-; RV32I-NEXT:    sw a0, 80(sp)
+; RV32I-NEXT:    sw a0, 80(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, %lo(var+12)(s0)
-; RV32I-NEXT:    sw a0, 76(sp)
+; RV32I-NEXT:    sw a0, 76(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s1, s0, %lo(var)
 ; RV32I-NEXT:    lw a0, 16(s1)
-; RV32I-NEXT:    sw a0, 72(sp)
+; RV32I-NEXT:    sw a0, 72(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 20(s1)
-; RV32I-NEXT:    sw a0, 68(sp)
+; RV32I-NEXT:    sw a0, 68(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 24(s1)
-; RV32I-NEXT:    sw a0, 64(sp)
+; RV32I-NEXT:    sw a0, 64(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 28(s1)
-; RV32I-NEXT:    sw a0, 60(sp)
+; RV32I-NEXT:    sw a0, 60(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 32(s1)
-; RV32I-NEXT:    sw a0, 56(sp)
+; RV32I-NEXT:    sw a0, 56(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 36(s1)
-; RV32I-NEXT:    sw a0, 52(sp)
+; RV32I-NEXT:    sw a0, 52(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 40(s1)
-; RV32I-NEXT:    sw a0, 48(sp)
+; RV32I-NEXT:    sw a0, 48(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 44(s1)
-; RV32I-NEXT:    sw a0, 44(sp)
+; RV32I-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 48(s1)
-; RV32I-NEXT:    sw a0, 40(sp)
+; RV32I-NEXT:    sw a0, 40(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 52(s1)
-; RV32I-NEXT:    sw a0, 36(sp)
+; RV32I-NEXT:    sw a0, 36(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 56(s1)
-; RV32I-NEXT:    sw a0, 32(sp)
+; RV32I-NEXT:    sw a0, 32(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 60(s1)
-; RV32I-NEXT:    sw a0, 28(sp)
+; RV32I-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 64(s1)
-; RV32I-NEXT:    sw a0, 24(sp)
+; RV32I-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 68(s1)
-; RV32I-NEXT:    sw a0, 20(sp)
+; RV32I-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 72(s1)
-; RV32I-NEXT:    sw a0, 16(sp)
+; RV32I-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 76(s1)
-; RV32I-NEXT:    sw a0, 12(sp)
+; RV32I-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 80(s1)
-; RV32I-NEXT:    sw a0, 8(sp)
+; RV32I-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a0, 84(s1)
-; RV32I-NEXT:    sw a0, 4(sp)
+; RV32I-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw s4, 88(s1)
 ; RV32I-NEXT:    lw s5, 92(s1)
 ; RV32I-NEXT:    lw s6, 96(s1)
@@ -563,131 +563,131 @@ define void @caller() nounwind {
 ; RV32I-NEXT:    sw s6, 96(s1)
 ; RV32I-NEXT:    sw s5, 92(s1)
 ; RV32I-NEXT:    sw s4, 88(s1)
-; RV32I-NEXT:    lw a0, 4(sp)
+; RV32I-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 84(s1)
-; RV32I-NEXT:    lw a0, 8(sp)
+; RV32I-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 80(s1)
-; RV32I-NEXT:    lw a0, 12(sp)
+; RV32I-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 76(s1)
-; RV32I-NEXT:    lw a0, 16(sp)
+; RV32I-NEXT:    lw a0, 16(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 72(s1)
-; RV32I-NEXT:    lw a0, 20(sp)
+; RV32I-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 68(s1)
-; RV32I-NEXT:    lw a0, 24(sp)
+; RV32I-NEXT:    lw a0, 24(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 64(s1)
-; RV32I-NEXT:    lw a0, 28(sp)
+; RV32I-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 60(s1)
-; RV32I-NEXT:    lw a0, 32(sp)
+; RV32I-NEXT:    lw a0, 32(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 56(s1)
-; RV32I-NEXT:    lw a0, 36(sp)
+; RV32I-NEXT:    lw a0, 36(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 52(s1)
-; RV32I-NEXT:    lw a0, 40(sp)
+; RV32I-NEXT:    lw a0, 40(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 48(s1)
-; RV32I-NEXT:    lw a0, 44(sp)
+; RV32I-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 44(s1)
-; RV32I-NEXT:    lw a0, 48(sp)
+; RV32I-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 40(s1)
-; RV32I-NEXT:    lw a0, 52(sp)
+; RV32I-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 36(s1)
-; RV32I-NEXT:    lw a0, 56(sp)
+; RV32I-NEXT:    lw a0, 56(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 32(s1)
-; RV32I-NEXT:    lw a0, 60(sp)
+; RV32I-NEXT:    lw a0, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 28(s1)
-; RV32I-NEXT:    lw a0, 64(sp)
+; RV32I-NEXT:    lw a0, 64(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 24(s1)
-; RV32I-NEXT:    lw a0, 68(sp)
+; RV32I-NEXT:    lw a0, 68(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 20(s1)
-; RV32I-NEXT:    lw a0, 72(sp)
+; RV32I-NEXT:    lw a0, 72(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, 16(s1)
-; RV32I-NEXT:    lw a0, 76(sp)
+; RV32I-NEXT:    lw a0, 76(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var+12)(s0)
-; RV32I-NEXT:    lw a0, 80(sp)
+; RV32I-NEXT:    lw a0, 80(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var+8)(s0)
-; RV32I-NEXT:    lw a0, 84(sp)
+; RV32I-NEXT:    lw a0, 84(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var+4)(s0)
-; RV32I-NEXT:    lw a0, 88(sp)
+; RV32I-NEXT:    lw a0, 88(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    sw a0, %lo(var)(s0)
-; RV32I-NEXT:    lw s11, 92(sp)
-; RV32I-NEXT:    lw s10, 96(sp)
-; RV32I-NEXT:    lw s9, 100(sp)
-; RV32I-NEXT:    lw s8, 104(sp)
-; RV32I-NEXT:    lw s7, 108(sp)
-; RV32I-NEXT:    lw s6, 112(sp)
-; RV32I-NEXT:    lw s5, 116(sp)
-; RV32I-NEXT:    lw s4, 120(sp)
-; RV32I-NEXT:    lw s3, 124(sp)
-; RV32I-NEXT:    lw s2, 128(sp)
-; RV32I-NEXT:    lw s1, 132(sp)
-; RV32I-NEXT:    lw s0, 136(sp)
-; RV32I-NEXT:    lw ra, 140(sp)
+; RV32I-NEXT:    lw s11, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s10, 96(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s9, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 104(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 108(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 112(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 116(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 120(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 124(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 128(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 144
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-WITH-FP-LABEL: caller:
 ; RV32I-WITH-FP:       # %bb.0:
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, -144
-; RV32I-WITH-FP-NEXT:    sw ra, 140(sp)
-; RV32I-WITH-FP-NEXT:    sw s0, 136(sp)
-; RV32I-WITH-FP-NEXT:    sw s1, 132(sp)
-; RV32I-WITH-FP-NEXT:    sw s2, 128(sp)
-; RV32I-WITH-FP-NEXT:    sw s3, 124(sp)
-; RV32I-WITH-FP-NEXT:    sw s4, 120(sp)
-; RV32I-WITH-FP-NEXT:    sw s5, 116(sp)
-; RV32I-WITH-FP-NEXT:    sw s6, 112(sp)
-; RV32I-WITH-FP-NEXT:    sw s7, 108(sp)
-; RV32I-WITH-FP-NEXT:    sw s8, 104(sp)
-; RV32I-WITH-FP-NEXT:    sw s9, 100(sp)
-; RV32I-WITH-FP-NEXT:    sw s10, 96(sp)
-; RV32I-WITH-FP-NEXT:    sw s11, 92(sp)
+; RV32I-WITH-FP-NEXT:    sw ra, 140(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s0, 136(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s1, 132(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s2, 128(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s3, 124(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s4, 120(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s5, 116(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s6, 112(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s7, 108(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s8, 104(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s9, 100(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s10, 96(sp) # 4-byte Folded Spill
+; RV32I-WITH-FP-NEXT:    sw s11, 92(sp) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    addi s0, sp, 144
 ; RV32I-WITH-FP-NEXT:    lui s6, %hi(var)
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var)(s6)
-; RV32I-WITH-FP-NEXT:    sw a0, -56(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -56(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var+4)(s6)
-; RV32I-WITH-FP-NEXT:    sw a0, -60(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -60(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var+8)(s6)
-; RV32I-WITH-FP-NEXT:    sw a0, -64(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -64(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, %lo(var+12)(s6)
-; RV32I-WITH-FP-NEXT:    sw a0, -68(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -68(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    addi s1, s6, %lo(var)
 ; RV32I-WITH-FP-NEXT:    lw a0, 16(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -72(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -72(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 20(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -76(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -76(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 24(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -80(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -80(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 28(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -84(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -84(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 32(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -88(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -88(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 36(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -92(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -92(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 40(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -96(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -96(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 44(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -100(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -100(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 48(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -104(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -104(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 52(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -108(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -108(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 56(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -112(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -112(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 60(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -116(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -116(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 64(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -120(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -120(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 68(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -124(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -124(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 72(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -128(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -128(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 76(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -132(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -132(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 80(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -136(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -136(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 84(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -140(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -140(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw a0, 88(s1)
-; RV32I-WITH-FP-NEXT:    sw a0, -144(s0)
+; RV32I-WITH-FP-NEXT:    sw a0, -144(s0) # 4-byte Folded Spill
 ; RV32I-WITH-FP-NEXT:    lw s8, 92(s1)
 ; RV32I-WITH-FP-NEXT:    lw s9, 96(s1)
 ; RV32I-WITH-FP-NEXT:    lw s10, 100(s1)
@@ -707,130 +707,130 @@ define void @caller() nounwind {
 ; RV32I-WITH-FP-NEXT:    sw s10, 100(s1)
 ; RV32I-WITH-FP-NEXT:    sw s9, 96(s1)
 ; RV32I-WITH-FP-NEXT:    sw s8, 92(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -144(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -144(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 88(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -140(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -140(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 84(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -136(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -136(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 80(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -132(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -132(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 76(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -128(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -128(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 72(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -124(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -124(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 68(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -120(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -120(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 64(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -116(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -116(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 60(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -112(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -112(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 56(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -108(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -108(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 52(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -104(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -104(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 48(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -100(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -100(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 44(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -96(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -96(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 40(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -92(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -92(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 36(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -88(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -88(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 32(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -84(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -84(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 28(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -80(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -80(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 24(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -76(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -76(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 20(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -72(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -72(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, 16(s1)
-; RV32I-WITH-FP-NEXT:    lw a0, -68(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -68(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var+12)(s6)
-; RV32I-WITH-FP-NEXT:    lw a0, -64(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -64(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var+8)(s6)
-; RV32I-WITH-FP-NEXT:    lw a0, -60(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -60(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var+4)(s6)
-; RV32I-WITH-FP-NEXT:    lw a0, -56(s0)
+; RV32I-WITH-FP-NEXT:    lw a0, -56(s0) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    sw a0, %lo(var)(s6)
-; RV32I-WITH-FP-NEXT:    lw s11, 92(sp)
-; RV32I-WITH-FP-NEXT:    lw s10, 96(sp)
-; RV32I-WITH-FP-NEXT:    lw s9, 100(sp)
-; RV32I-WITH-FP-NEXT:    lw s8, 104(sp)
-; RV32I-WITH-FP-NEXT:    lw s7, 108(sp)
-; RV32I-WITH-FP-NEXT:    lw s6, 112(sp)
-; RV32I-WITH-FP-NEXT:    lw s5, 116(sp)
-; RV32I-WITH-FP-NEXT:    lw s4, 120(sp)
-; RV32I-WITH-FP-NEXT:    lw s3, 124(sp)
-; RV32I-WITH-FP-NEXT:    lw s2, 128(sp)
-; RV32I-WITH-FP-NEXT:    lw s1, 132(sp)
-; RV32I-WITH-FP-NEXT:    lw s0, 136(sp)
-; RV32I-WITH-FP-NEXT:    lw ra, 140(sp)
+; RV32I-WITH-FP-NEXT:    lw s11, 92(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s10, 96(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s9, 100(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s8, 104(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s7, 108(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s6, 112(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s5, 116(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s4, 120(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s3, 124(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s2, 128(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
+; RV32I-WITH-FP-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; RV32I-WITH-FP-NEXT:    addi sp, sp, 144
 ; RV32I-WITH-FP-NEXT:    ret
 ;
 ; RV64I-LABEL: caller:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -288
-; RV64I-NEXT:    sd ra, 280(sp)
-; RV64I-NEXT:    sd s0, 272(sp)
-; RV64I-NEXT:    sd s1, 264(sp)
-; RV64I-NEXT:    sd s2, 256(sp)
-; RV64I-NEXT:    sd s3, 248(sp)
-; RV64I-NEXT:    sd s4, 240(sp)
-; RV64I-NEXT:    sd s5, 232(sp)
-; RV64I-NEXT:    sd s6, 224(sp)
-; RV64I-NEXT:    sd s7, 216(sp)
-; RV64I-NEXT:    sd s8, 208(sp)
-; RV64I-NEXT:    sd s9, 200(sp)
-; RV64I-NEXT:    sd s10, 192(sp)
-; RV64I-NEXT:    sd s11, 184(sp)
+; RV64I-NEXT:    sd ra, 280(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 272(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 264(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 256(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 248(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 240(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 232(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 224(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 216(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 208(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 200(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s10, 192(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s11, 184(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lui s0, %hi(var)
 ; RV64I-NEXT:    lw a0, %lo(var)(s0)
-; RV64I-NEXT:    sd a0, 176(sp)
+; RV64I-NEXT:    sd a0, 176(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, %lo(var+4)(s0)
-; RV64I-NEXT:    sd a0, 168(sp)
+; RV64I-NEXT:    sd a0, 168(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, %lo(var+8)(s0)
-; RV64I-NEXT:    sd a0, 160(sp)
+; RV64I-NEXT:    sd a0, 160(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, %lo(var+12)(s0)
-; RV64I-NEXT:    sd a0, 152(sp)
+; RV64I-NEXT:    sd a0, 152(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s1, s0, %lo(var)
 ; RV64I-NEXT:    lw a0, 16(s1)
-; RV64I-NEXT:    sd a0, 144(sp)
+; RV64I-NEXT:    sd a0, 144(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 20(s1)
-; RV64I-NEXT:    sd a0, 136(sp)
+; RV64I-NEXT:    sd a0, 136(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 24(s1)
-; RV64I-NEXT:    sd a0, 128(sp)
+; RV64I-NEXT:    sd a0, 128(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 28(s1)
-; RV64I-NEXT:    sd a0, 120(sp)
+; RV64I-NEXT:    sd a0, 120(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 32(s1)
-; RV64I-NEXT:    sd a0, 112(sp)
+; RV64I-NEXT:    sd a0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 36(s1)
-; RV64I-NEXT:    sd a0, 104(sp)
+; RV64I-NEXT:    sd a0, 104(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 40(s1)
-; RV64I-NEXT:    sd a0, 96(sp)
+; RV64I-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 44(s1)
-; RV64I-NEXT:    sd a0, 88(sp)
+; RV64I-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 48(s1)
-; RV64I-NEXT:    sd a0, 80(sp)
+; RV64I-NEXT:    sd a0, 80(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 52(s1)
-; RV64I-NEXT:    sd a0, 72(sp)
+; RV64I-NEXT:    sd a0, 72(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 56(s1)
-; RV64I-NEXT:    sd a0, 64(sp)
+; RV64I-NEXT:    sd a0, 64(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 60(s1)
-; RV64I-NEXT:    sd a0, 56(sp)
+; RV64I-NEXT:    sd a0, 56(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 64(s1)
-; RV64I-NEXT:    sd a0, 48(sp)
+; RV64I-NEXT:    sd a0, 48(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 68(s1)
-; RV64I-NEXT:    sd a0, 40(sp)
+; RV64I-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 72(s1)
-; RV64I-NEXT:    sd a0, 32(sp)
+; RV64I-NEXT:    sd a0, 32(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 76(s1)
-; RV64I-NEXT:    sd a0, 24(sp)
+; RV64I-NEXT:    sd a0, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 80(s1)
-; RV64I-NEXT:    sd a0, 16(sp)
+; RV64I-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw a0, 84(s1)
-; RV64I-NEXT:    sd a0, 8(sp)
+; RV64I-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lw s4, 88(s1)
 ; RV64I-NEXT:    lw s5, 92(s1)
 ; RV64I-NEXT:    lw s6, 96(s1)
@@ -852,131 +852,131 @@ define void @caller() nounwind {
 ; RV64I-NEXT:    sw s6, 96(s1)
 ; RV64I-NEXT:    sw s5, 92(s1)
 ; RV64I-NEXT:    sw s4, 88(s1)
-; RV64I-NEXT:    ld a0, 8(sp)
+; RV64I-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 84(s1)
-; RV64I-NEXT:    ld a0, 16(sp)
+; RV64I-NEXT:    ld a0, 16(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 80(s1)
-; RV64I-NEXT:    ld a0, 24(sp)
+; RV64I-NEXT:    ld a0, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 76(s1)
-; RV64I-NEXT:    ld a0, 32(sp)
+; RV64I-NEXT:    ld a0, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 72(s1)
-; RV64I-NEXT:    ld a0, 40(sp)
+; RV64I-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 68(s1)
-; RV64I-NEXT:    ld a0, 48(sp)
+; RV64I-NEXT:    ld a0, 48(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 64(s1)
-; RV64I-NEXT:    ld a0, 56(sp)
+; RV64I-NEXT:    ld a0, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 60(s1)
-; RV64I-NEXT:    ld a0, 64(sp)
+; RV64I-NEXT:    ld a0, 64(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 56(s1)
-; RV64I-NEXT:    ld a0, 72(sp)
+; RV64I-NEXT:    ld a0, 72(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 52(s1)
-; RV64I-NEXT:    ld a0, 80(sp)
+; RV64I-NEXT:    ld a0, 80(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 48(s1)
-; RV64I-NEXT:    ld a0, 88(sp)
+; RV64I-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 44(s1)
-; RV64I-NEXT:    ld a0, 96(sp)
+; RV64I-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 40(s1)
-; RV64I-NEXT:    ld a0, 104(sp)
+; RV64I-NEXT:    ld a0, 104(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 36(s1)
-; RV64I-NEXT:    ld a0, 112(sp)
+; RV64I-NEXT:    ld a0, 112(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 32(s1)
-; RV64I-NEXT:    ld a0, 120(sp)
+; RV64I-NEXT:    ld a0, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 28(s1)
-; RV64I-NEXT:    ld a0, 128(sp)
+; RV64I-NEXT:    ld a0, 128(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 24(s1)
-; RV64I-NEXT:    ld a0, 136(sp)
+; RV64I-NEXT:    ld a0, 136(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 20(s1)
-; RV64I-NEXT:    ld a0, 144(sp)
+; RV64I-NEXT:    ld a0, 144(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, 16(s1)
-; RV64I-NEXT:    ld a0, 152(sp)
+; RV64I-NEXT:    ld a0, 152(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var+12)(s0)
-; RV64I-NEXT:    ld a0, 160(sp)
+; RV64I-NEXT:    ld a0, 160(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var+8)(s0)
-; RV64I-NEXT:    ld a0, 168(sp)
+; RV64I-NEXT:    ld a0, 168(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var+4)(s0)
-; RV64I-NEXT:    ld a0, 176(sp)
+; RV64I-NEXT:    ld a0, 176(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    sw a0, %lo(var)(s0)
-; RV64I-NEXT:    ld s11, 184(sp)
-; RV64I-NEXT:    ld s10, 192(sp)
-; RV64I-NEXT:    ld s9, 200(sp)
-; RV64I-NEXT:    ld s8, 208(sp)
-; RV64I-NEXT:    ld s7, 216(sp)
-; RV64I-NEXT:    ld s6, 224(sp)
-; RV64I-NEXT:    ld s5, 232(sp)
-; RV64I-NEXT:    ld s4, 240(sp)
-; RV64I-NEXT:    ld s3, 248(sp)
-; RV64I-NEXT:    ld s2, 256(sp)
-; RV64I-NEXT:    ld s1, 264(sp)
-; RV64I-NEXT:    ld s0, 272(sp)
-; RV64I-NEXT:    ld ra, 280(sp)
+; RV64I-NEXT:    ld s11, 184(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s10, 192(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s9, 200(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 208(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 216(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 224(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 232(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 240(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 248(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 256(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 264(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 272(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 288
 ; RV64I-NEXT:    ret
 ;
 ; RV64I-WITH-FP-LABEL: caller:
 ; RV64I-WITH-FP:       # %bb.0:
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, -288
-; RV64I-WITH-FP-NEXT:    sd ra, 280(sp)
-; RV64I-WITH-FP-NEXT:    sd s0, 272(sp)
-; RV64I-WITH-FP-NEXT:    sd s1, 264(sp)
-; RV64I-WITH-FP-NEXT:    sd s2, 256(sp)
-; RV64I-WITH-FP-NEXT:    sd s3, 248(sp)
-; RV64I-WITH-FP-NEXT:    sd s4, 240(sp)
-; RV64I-WITH-FP-NEXT:    sd s5, 232(sp)
-; RV64I-WITH-FP-NEXT:    sd s6, 224(sp)
-; RV64I-WITH-FP-NEXT:    sd s7, 216(sp)
-; RV64I-WITH-FP-NEXT:    sd s8, 208(sp)
-; RV64I-WITH-FP-NEXT:    sd s9, 200(sp)
-; RV64I-WITH-FP-NEXT:    sd s10, 192(sp)
-; RV64I-WITH-FP-NEXT:    sd s11, 184(sp)
+; RV64I-WITH-FP-NEXT:    sd ra, 280(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s0, 272(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s1, 264(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s2, 256(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s3, 248(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s4, 240(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s5, 232(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s6, 224(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s7, 216(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s8, 208(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s9, 200(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s10, 192(sp) # 8-byte Folded Spill
+; RV64I-WITH-FP-NEXT:    sd s11, 184(sp) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    addi s0, sp, 288
 ; RV64I-WITH-FP-NEXT:    lui s6, %hi(var)
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var)(s6)
-; RV64I-WITH-FP-NEXT:    sd a0, -112(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -112(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var+4)(s6)
-; RV64I-WITH-FP-NEXT:    sd a0, -120(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -120(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var+8)(s6)
-; RV64I-WITH-FP-NEXT:    sd a0, -128(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -128(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, %lo(var+12)(s6)
-; RV64I-WITH-FP-NEXT:    sd a0, -136(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -136(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    addi s1, s6, %lo(var)
 ; RV64I-WITH-FP-NEXT:    lw a0, 16(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -144(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -144(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 20(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -152(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -152(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 24(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -160(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -160(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 28(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -168(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -168(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 32(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -176(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -176(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 36(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -184(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -184(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 40(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -192(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -192(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 44(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -200(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -200(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 48(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -208(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -208(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 52(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -216(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -216(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 56(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -224(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -224(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 60(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -232(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -232(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 64(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -240(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -240(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 68(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -248(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -248(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 72(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -256(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -256(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 76(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -264(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -264(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 80(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -272(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -272(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 84(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -280(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -280(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw a0, 88(s1)
-; RV64I-WITH-FP-NEXT:    sd a0, -288(s0)
+; RV64I-WITH-FP-NEXT:    sd a0, -288(s0) # 8-byte Folded Spill
 ; RV64I-WITH-FP-NEXT:    lw s8, 92(s1)
 ; RV64I-WITH-FP-NEXT:    lw s9, 96(s1)
 ; RV64I-WITH-FP-NEXT:    lw s10, 100(s1)
@@ -996,65 +996,65 @@ define void @caller() nounwind {
 ; RV64I-WITH-FP-NEXT:    sw s10, 100(s1)
 ; RV64I-WITH-FP-NEXT:    sw s9, 96(s1)
 ; RV64I-WITH-FP-NEXT:    sw s8, 92(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -288(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -288(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 88(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -280(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -280(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 84(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -272(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -272(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 80(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -264(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -264(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 76(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -256(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -256(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 72(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -248(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -248(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 68(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -240(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -240(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 64(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -232(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -232(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 60(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -224(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -224(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 56(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -216(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -216(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 52(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -208(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -208(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 48(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -200(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -200(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 44(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -192(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -192(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 40(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -184(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -184(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 36(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -176(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -176(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 32(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -168(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -168(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 28(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -160(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -160(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 24(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -152(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -152(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 20(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -144(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -144(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, 16(s1)
-; RV64I-WITH-FP-NEXT:    ld a0, -136(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -136(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var+12)(s6)
-; RV64I-WITH-FP-NEXT:    ld a0, -128(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -128(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var+8)(s6)
-; RV64I-WITH-FP-NEXT:    ld a0, -120(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -120(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var+4)(s6)
-; RV64I-WITH-FP-NEXT:    ld a0, -112(s0)
+; RV64I-WITH-FP-NEXT:    ld a0, -112(s0) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    sw a0, %lo(var)(s6)
-; RV64I-WITH-FP-NEXT:    ld s11, 184(sp)
-; RV64I-WITH-FP-NEXT:    ld s10, 192(sp)
-; RV64I-WITH-FP-NEXT:    ld s9, 200(sp)
-; RV64I-WITH-FP-NEXT:    ld s8, 208(sp)
-; RV64I-WITH-FP-NEXT:    ld s7, 216(sp)
-; RV64I-WITH-FP-NEXT:    ld s6, 224(sp)
-; RV64I-WITH-FP-NEXT:    ld s5, 232(sp)
-; RV64I-WITH-FP-NEXT:    ld s4, 240(sp)
-; RV64I-WITH-FP-NEXT:    ld s3, 248(sp)
-; RV64I-WITH-FP-NEXT:    ld s2, 256(sp)
-; RV64I-WITH-FP-NEXT:    ld s1, 264(sp)
-; RV64I-WITH-FP-NEXT:    ld s0, 272(sp)
-; RV64I-WITH-FP-NEXT:    ld ra, 280(sp)
+; RV64I-WITH-FP-NEXT:    ld s11, 184(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s10, 192(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s9, 200(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s8, 208(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s7, 216(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s6, 224(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s5, 232(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s4, 240(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s3, 248(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s2, 256(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s1, 264(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld s0, 272(sp) # 8-byte Folded Reload
+; RV64I-WITH-FP-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
 ; RV64I-WITH-FP-NEXT:    addi sp, sp, 288
 ; RV64I-WITH-FP-NEXT:    ret
 

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
index 284476fffe05..6125bcfc2902 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
@@ -21,33 +21,33 @@ define i32 @callee_double_in_regs(i32 %a, double %b) nounwind {
 ; RV32I-FPELIM-LABEL: callee_double_in_regs:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
-; RV32I-FPELIM-NEXT:    sw s0, 8(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    mv s0, a0
 ; RV32I-FPELIM-NEXT:    mv a0, a1
 ; RV32I-FPELIM-NEXT:    mv a1, a2
-; RV32I-FPELIM-NEXT:    call __fixdfsi
+; RV32I-FPELIM-NEXT:    call __fixdfsi at plt
 ; RV32I-FPELIM-NEXT:    add a0, s0, a0
-; RV32I-FPELIM-NEXT:    lw s0, 8(sp)
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: callee_double_in_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    sw s1, 4(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    mv s1, a0
 ; RV32I-WITHFP-NEXT:    mv a0, a1
 ; RV32I-WITHFP-NEXT:    mv a1, a2
-; RV32I-WITHFP-NEXT:    call __fixdfsi
+; RV32I-WITHFP-NEXT:    call __fixdfsi at plt
 ; RV32I-WITHFP-NEXT:    add a0, s1, a0
-; RV32I-WITHFP-NEXT:    lw s1, 4(sp)
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %b_fptosi = fptosi double %b to i32
@@ -59,27 +59,27 @@ define i32 @caller_double_in_regs() nounwind {
 ; RV32I-FPELIM-LABEL: caller_double_in_regs:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV32I-FPELIM-NEXT:    lui a2, 262144
 ; RV32I-FPELIM-NEXT:    mv a1, zero
 ; RV32I-FPELIM-NEXT:    call callee_double_in_regs
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_double_in_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV32I-WITHFP-NEXT:    lui a2, 262144
 ; RV32I-WITHFP-NEXT:    mv a1, zero
 ; RV32I-WITHFP-NEXT:    call callee_double_in_regs
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_double_in_regs(i32 1, double 2.0)
@@ -108,8 +108,8 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; RV32I-WITHFP-LABEL: callee_aligned_stack:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw a0, 0(a2)
 ; RV32I-WITHFP-NEXT:    lw a1, 20(s0)
@@ -121,8 +121,8 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; RV32I-WITHFP-NEXT:    add a0, a0, a3
 ; RV32I-WITHFP-NEXT:    add a0, a0, a4
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = bitcast fp128 %c to i128
@@ -144,7 +144,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-LABEL: caller_aligned_stack:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -64
-; RV32I-FPELIM-NEXT:    sw ra, 60(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 18
 ; RV32I-FPELIM-NEXT:    sw a0, 24(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 17
@@ -181,15 +181,15 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    addi a7, zero, 14
 ; RV32I-FPELIM-NEXT:    sw t0, 32(sp)
 ; RV32I-FPELIM-NEXT:    call callee_aligned_stack
-; RV32I-FPELIM-NEXT:    lw ra, 60(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_aligned_stack:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -64
-; RV32I-WITHFP-NEXT:    sw ra, 60(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 56(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 64
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 18
 ; RV32I-WITHFP-NEXT:    sw a0, 24(sp)
@@ -227,8 +227,8 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    addi a7, zero, 14
 ; RV32I-WITHFP-NEXT:    sw t0, -32(s0)
 ; RV32I-WITHFP-NEXT:    call callee_aligned_stack
-; RV32I-WITHFP-NEXT:    lw s0, 56(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 60(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 64
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_aligned_stack(i32 1, i32 11,
@@ -248,13 +248,13 @@ define double @callee_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-LABEL: callee_small_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a1, 261888
 ; RV32I-WITHFP-NEXT:    mv a0, zero
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   ret double 1.0
@@ -264,21 +264,21 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; RV32I-FPELIM-LABEL: caller_small_scalar_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    call callee_small_scalar_ret
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_small_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    call callee_small_scalar_ret
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call double @callee_small_scalar_ret()

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 150d786f4c3b..f76c35252f02 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -33,12 +33,12 @@ define i32 @callee_i64_in_regs(i32 %a, i64 %b) nounwind {
 ; RV32I-WITHFP-LABEL: callee_i64_in_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %b_trunc = trunc i64 %b to i32
@@ -50,27 +50,27 @@ define i32 @caller_i64_in_regs() nounwind {
 ; RV32I-FPELIM-LABEL: caller_i64_in_regs:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV32I-FPELIM-NEXT:    addi a1, zero, 2
 ; RV32I-FPELIM-NEXT:    mv a2, zero
 ; RV32I-FPELIM-NEXT:    call callee_i64_in_regs
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_i64_in_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV32I-WITHFP-NEXT:    addi a1, zero, 2
 ; RV32I-WITHFP-NEXT:    mv a2, zero
 ; RV32I-WITHFP-NEXT:    call callee_i64_in_regs
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_i64_in_regs(i32 1, i64 2)
@@ -103,8 +103,8 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
 ; RV32I-WITHFP-LABEL: callee_many_scalars:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw t0, 4(s0)
 ; RV32I-WITHFP-NEXT:    lw t1, 0(s0)
@@ -122,8 +122,8 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
 ; RV32I-WITHFP-NEXT:    add a0, a0, a5
 ; RV32I-WITHFP-NEXT:    add a0, a0, a6
 ; RV32I-WITHFP-NEXT:    add a0, a0, t0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %a_ext = zext i8 %a to i32
@@ -143,7 +143,7 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-FPELIM-LABEL: caller_many_scalars:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 8
 ; RV32I-FPELIM-NEXT:    sw a0, 4(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
@@ -156,15 +156,15 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-FPELIM-NEXT:    sw zero, 0(sp)
 ; RV32I-FPELIM-NEXT:    mv a4, zero
 ; RV32I-FPELIM-NEXT:    call callee_many_scalars
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_many_scalars:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 8
 ; RV32I-WITHFP-NEXT:    sw a0, 4(sp)
@@ -178,8 +178,8 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-WITHFP-NEXT:    sw zero, 0(sp)
 ; RV32I-WITHFP-NEXT:    mv a4, zero
 ; RV32I-WITHFP-NEXT:    call callee_many_scalars
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8)
@@ -213,8 +213,8 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind {
 ; RV32I-WITHFP-LABEL: callee_large_scalars:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw a6, 0(a1)
 ; RV32I-WITHFP-NEXT:    lw a7, 0(a0)
@@ -232,8 +232,8 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind {
 ; RV32I-WITHFP-NEXT:    or a0, a1, a0
 ; RV32I-WITHFP-NEXT:    or a0, a0, a2
 ; RV32I-WITHFP-NEXT:    seqz a0, a0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %b_bitcast = bitcast fp128 %b to i128
@@ -246,7 +246,7 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-FPELIM-LABEL: caller_large_scalars:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -48
-; RV32I-FPELIM-NEXT:    sw ra, 44(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    lui a0, 524272
 ; RV32I-FPELIM-NEXT:    sw a0, 12(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 8(sp)
@@ -260,15 +260,15 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-FPELIM-NEXT:    mv a1, sp
 ; RV32I-FPELIM-NEXT:    sw a2, 24(sp)
 ; RV32I-FPELIM-NEXT:    call callee_large_scalars
-; RV32I-FPELIM-NEXT:    lw ra, 44(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 48
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_large_scalars:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -48
-; RV32I-WITHFP-NEXT:    sw ra, 44(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 40(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 48
 ; RV32I-WITHFP-NEXT:    lui a0, 524272
 ; RV32I-WITHFP-NEXT:    sw a0, -36(s0)
@@ -283,8 +283,8 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-WITHFP-NEXT:    addi a1, s0, -48
 ; RV32I-WITHFP-NEXT:    sw a2, -24(s0)
 ; RV32I-WITHFP-NEXT:    call callee_large_scalars
-; RV32I-WITHFP-NEXT:    lw s0, 40(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 44(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 48
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_large_scalars(i128 1, fp128 0xL00000000000000007FFF000000000000)
@@ -320,8 +320,8 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d,
 ; RV32I-WITHFP-LABEL: callee_large_scalars_exhausted_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw a0, 4(s0)
 ; RV32I-WITHFP-NEXT:    lw a6, 0(a0)
@@ -340,8 +340,8 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d,
 ; RV32I-WITHFP-NEXT:    or a0, a2, a0
 ; RV32I-WITHFP-NEXT:    or a0, a0, a1
 ; RV32I-WITHFP-NEXT:    seqz a0, a0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %j_bitcast = bitcast fp128 %j to i128
@@ -354,7 +354,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-FPELIM-LABEL: caller_large_scalars_exhausted_regs:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -64
-; RV32I-FPELIM-NEXT:    sw ra, 60(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 16
 ; RV32I-FPELIM-NEXT:    sw a0, 4(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 9
@@ -378,15 +378,15 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    addi a7, sp, 40
 ; RV32I-FPELIM-NEXT:    sw t0, 40(sp)
 ; RV32I-FPELIM-NEXT:    call callee_large_scalars_exhausted_regs
-; RV32I-FPELIM-NEXT:    lw ra, 60(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_large_scalars_exhausted_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -64
-; RV32I-WITHFP-NEXT:    sw ra, 60(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 56(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 64
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -48
 ; RV32I-WITHFP-NEXT:    sw a0, 4(sp)
@@ -411,8 +411,8 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    addi a7, s0, -24
 ; RV32I-WITHFP-NEXT:    sw t0, -24(s0)
 ; RV32I-WITHFP-NEXT:    call callee_large_scalars_exhausted_regs
-; RV32I-WITHFP-NEXT:    lw s0, 56(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 60(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 64
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_large_scalars_exhausted_regs(
@@ -427,29 +427,29 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
 ; RV32I-FPELIM-LABEL: caller_mixed_scalar_libcalls:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -32
-; RV32I-FPELIM-NEXT:    sw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    mv a2, a1
 ; RV32I-FPELIM-NEXT:    mv a1, a0
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 8
-; RV32I-FPELIM-NEXT:    call __floatditf
+; RV32I-FPELIM-NEXT:    call __floatditf at plt
 ; RV32I-FPELIM-NEXT:    lw a0, 8(sp)
-; RV32I-FPELIM-NEXT:    lw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_mixed_scalar_libcalls:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -32
-; RV32I-WITHFP-NEXT:    sw ra, 28(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 24(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV32I-WITHFP-NEXT:    mv a2, a1
 ; RV32I-WITHFP-NEXT:    mv a1, a0
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -24
-; RV32I-WITHFP-NEXT:    call __floatditf
+; RV32I-WITHFP-NEXT:    call __floatditf at plt
 ; RV32I-WITHFP-NEXT:    lw a0, -24(s0)
-; RV32I-WITHFP-NEXT:    lw s0, 24(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 28(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV32I-WITHFP-NEXT:    ret
   %1 = sitofp i64 %a to fp128
@@ -472,13 +472,13 @@ define i32 @callee_small_coerced_struct([2 x i32] %a.coerce) nounwind {
 ; RV32I-WITHFP-LABEL: callee_small_coerced_struct:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    xor a0, a0, a1
 ; RV32I-WITHFP-NEXT:    seqz a0, a0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = extractvalue [2 x i32] %a.coerce, 0
@@ -492,25 +492,25 @@ define i32 @caller_small_coerced_struct() nounwind {
 ; RV32I-FPELIM-LABEL: caller_small_coerced_struct:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV32I-FPELIM-NEXT:    addi a1, zero, 2
 ; RV32I-FPELIM-NEXT:    call callee_small_coerced_struct
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_small_coerced_struct:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV32I-WITHFP-NEXT:    addi a1, zero, 2
 ; RV32I-WITHFP-NEXT:    call callee_small_coerced_struct
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_small_coerced_struct([2 x i32] [i32 1, i32 2])
@@ -532,14 +532,14 @@ define i32 @callee_large_struct(%struct.large* byval(%struct.large) align 4 %a)
 ; RV32I-WITHFP-LABEL: callee_large_struct:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw a1, 0(a0)
 ; RV32I-WITHFP-NEXT:    lw a0, 12(a0)
 ; RV32I-WITHFP-NEXT:    add a0, a1, a0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 0
@@ -554,7 +554,7 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-FPELIM-LABEL: caller_large_struct:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -48
-; RV32I-FPELIM-NEXT:    sw ra, 44(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV32I-FPELIM-NEXT:    sw a0, 24(sp)
 ; RV32I-FPELIM-NEXT:    addi a1, zero, 2
@@ -569,15 +569,15 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-FPELIM-NEXT:    sw a3, 20(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 8
 ; RV32I-FPELIM-NEXT:    call callee_large_struct
-; RV32I-FPELIM-NEXT:    lw ra, 44(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 48
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_large_struct:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -48
-; RV32I-WITHFP-NEXT:    sw ra, 44(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 40(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 48
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV32I-WITHFP-NEXT:    sw a0, -24(s0)
@@ -593,8 +593,8 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-WITHFP-NEXT:    sw a3, -28(s0)
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -40
 ; RV32I-WITHFP-NEXT:    call callee_large_struct
-; RV32I-WITHFP-NEXT:    lw s0, 40(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 44(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 48
 ; RV32I-WITHFP-NEXT:    ret
   %ls = alloca %struct.large, align 4
@@ -633,8 +633,8 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; RV32I-WITHFP-LABEL: callee_aligned_stack:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw a0, 0(a2)
 ; RV32I-WITHFP-NEXT:    lw a1, 20(s0)
@@ -646,8 +646,8 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
 ; RV32I-WITHFP-NEXT:    add a0, a0, a3
 ; RV32I-WITHFP-NEXT:    add a0, a0, a4
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = bitcast fp128 %c to i128
@@ -668,7 +668,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-LABEL: caller_aligned_stack:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -64
-; RV32I-FPELIM-NEXT:    sw ra, 60(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 19
 ; RV32I-FPELIM-NEXT:    sw a0, 24(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 18
@@ -702,15 +702,15 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    addi a7, zero, 14
 ; RV32I-FPELIM-NEXT:    sw t0, 32(sp)
 ; RV32I-FPELIM-NEXT:    call callee_aligned_stack
-; RV32I-FPELIM-NEXT:    lw ra, 60(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_aligned_stack:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -64
-; RV32I-WITHFP-NEXT:    sw ra, 60(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 56(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 64
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 19
 ; RV32I-WITHFP-NEXT:    sw a0, 24(sp)
@@ -745,8 +745,8 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    addi a7, zero, 14
 ; RV32I-WITHFP-NEXT:    sw t0, -32(s0)
 ; RV32I-WITHFP-NEXT:    call callee_aligned_stack
-; RV32I-WITHFP-NEXT:    lw s0, 56(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 60(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 64
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_aligned_stack(i32 1, i32 11,
@@ -769,14 +769,14 @@ define i64 @callee_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-LABEL: callee_small_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a0, 466866
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 1677
 ; RV32I-WITHFP-NEXT:    addi a1, zero, 287
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   ret i64 1234567898765
@@ -786,7 +786,7 @@ define i32 @caller_small_scalar_ret() nounwind {
 ; RV32I-FPELIM-LABEL: caller_small_scalar_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    call callee_small_scalar_ret
 ; RV32I-FPELIM-NEXT:    lui a2, 56
 ; RV32I-FPELIM-NEXT:    addi a2, a2, 580
@@ -796,15 +796,15 @@ define i32 @caller_small_scalar_ret() nounwind {
 ; RV32I-FPELIM-NEXT:    xor a0, a0, a2
 ; RV32I-FPELIM-NEXT:    or a0, a0, a1
 ; RV32I-FPELIM-NEXT:    seqz a0, a0
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_small_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    call callee_small_scalar_ret
 ; RV32I-WITHFP-NEXT:    lui a2, 56
@@ -815,8 +815,8 @@ define i32 @caller_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    xor a0, a0, a2
 ; RV32I-WITHFP-NEXT:    or a0, a0, a1
 ; RV32I-WITHFP-NEXT:    seqz a0, a0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i64 @callee_small_scalar_ret()
@@ -837,13 +837,13 @@ define %struct.small @callee_small_struct_ret() nounwind {
 ; RV32I-WITHFP-LABEL: callee_small_struct_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV32I-WITHFP-NEXT:    mv a1, zero
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   ret %struct.small { i32 1, i32* null }
@@ -853,23 +853,23 @@ define i32 @caller_small_struct_ret() nounwind {
 ; RV32I-FPELIM-LABEL: caller_small_struct_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    call callee_small_struct_ret
 ; RV32I-FPELIM-NEXT:    add a0, a0, a1
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_small_struct_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    call callee_small_struct_ret
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call %struct.small @callee_small_struct_ret()
@@ -895,16 +895,16 @@ define fp128 @callee_large_scalar_ret() nounwind {
 ; RV32I-WITHFP-LABEL: callee_large_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a1, 524272
 ; RV32I-WITHFP-NEXT:    sw a1, 12(a0)
 ; RV32I-WITHFP-NEXT:    sw zero, 8(a0)
 ; RV32I-WITHFP-NEXT:    sw zero, 4(a0)
 ; RV32I-WITHFP-NEXT:    sw zero, 0(a0)
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   ret fp128 0xL00000000000000007FFF000000000000
@@ -914,23 +914,23 @@ define void @caller_large_scalar_ret() nounwind {
 ; RV32I-FPELIM-LABEL: caller_large_scalar_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -32
-; RV32I-FPELIM-NEXT:    sw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    mv a0, sp
 ; RV32I-FPELIM-NEXT:    call callee_large_scalar_ret
-; RV32I-FPELIM-NEXT:    lw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_large_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -32
-; RV32I-WITHFP-NEXT:    sw ra, 28(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 24(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -32
 ; RV32I-WITHFP-NEXT:    call callee_large_scalar_ret
-; RV32I-WITHFP-NEXT:    lw s0, 24(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 28(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call fp128 @callee_large_scalar_ret()
@@ -955,8 +955,8 @@ define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large)
 ; RV32I-WITHFP-LABEL: callee_large_struct_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a1, zero, 1
 ; RV32I-WITHFP-NEXT:    sw a1, 0(a0)
@@ -966,8 +966,8 @@ define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large)
 ; RV32I-WITHFP-NEXT:    sw a1, 8(a0)
 ; RV32I-WITHFP-NEXT:    addi a1, zero, 4
 ; RV32I-WITHFP-NEXT:    sw a1, 12(a0)
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 0
@@ -985,29 +985,29 @@ define i32 @caller_large_struct_ret() nounwind {
 ; RV32I-FPELIM-LABEL: caller_large_struct_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -32
-; RV32I-FPELIM-NEXT:    sw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 8
 ; RV32I-FPELIM-NEXT:    call callee_large_struct_ret
 ; RV32I-FPELIM-NEXT:    lw a0, 8(sp)
 ; RV32I-FPELIM-NEXT:    lw a1, 20(sp)
 ; RV32I-FPELIM-NEXT:    add a0, a0, a1
-; RV32I-FPELIM-NEXT:    lw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_large_struct_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -32
-; RV32I-WITHFP-NEXT:    sw ra, 28(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 24(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -24
 ; RV32I-WITHFP-NEXT:    call callee_large_struct_ret
 ; RV32I-WITHFP-NEXT:    lw a0, -24(s0)
 ; RV32I-WITHFP-NEXT:    lw a1, -12(s0)
 ; RV32I-WITHFP-NEXT:    add a0, a0, a1
-; RV32I-WITHFP-NEXT:    lw s0, 24(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 28(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV32I-WITHFP-NEXT:    ret
   %1 = alloca %struct.large

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
index 8ff3de799084..500a158b4961 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
@@ -16,31 +16,31 @@ define i32 @callee_float_in_regs(i32 %a, float %b) nounwind {
 ; RV32I-FPELIM-LABEL: callee_float_in_regs:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
-; RV32I-FPELIM-NEXT:    sw s0, 8(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    mv s0, a0
 ; RV32I-FPELIM-NEXT:    mv a0, a1
-; RV32I-FPELIM-NEXT:    call __fixsfsi
+; RV32I-FPELIM-NEXT:    call __fixsfsi at plt
 ; RV32I-FPELIM-NEXT:    add a0, s0, a0
-; RV32I-FPELIM-NEXT:    lw s0, 8(sp)
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: callee_float_in_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    sw s1, 4(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    mv s1, a0
 ; RV32I-WITHFP-NEXT:    mv a0, a1
-; RV32I-WITHFP-NEXT:    call __fixsfsi
+; RV32I-WITHFP-NEXT:    call __fixsfsi at plt
 ; RV32I-WITHFP-NEXT:    add a0, s1, a0
-; RV32I-WITHFP-NEXT:    lw s1, 4(sp)
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %b_fptosi = fptosi float %b to i32
@@ -52,25 +52,25 @@ define i32 @caller_float_in_regs() nounwind {
 ; RV32I-FPELIM-LABEL: caller_float_in_regs:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV32I-FPELIM-NEXT:    lui a1, 262144
 ; RV32I-FPELIM-NEXT:    call callee_float_in_regs
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_float_in_regs:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV32I-WITHFP-NEXT:    lui a1, 262144
 ; RV32I-WITHFP-NEXT:    call callee_float_in_regs
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_float_in_regs(i32 1, float 2.0)
@@ -87,13 +87,13 @@ define i32 @callee_float_on_stack(i64 %a, i64 %b, i64 %c, i64 %d, float %e) noun
 ; RV32I-WITHFP-LABEL: callee_float_on_stack:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw a0, 0(s0)
 ; RV32I-WITHFP-NEXT:    add a0, a6, a0
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = trunc i64 %d to i32
@@ -106,7 +106,7 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-FPELIM-LABEL: caller_float_on_stack:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    lui a1, 264704
 ; RV32I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV32I-FPELIM-NEXT:    addi a2, zero, 2
@@ -118,15 +118,15 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    mv a5, zero
 ; RV32I-FPELIM-NEXT:    mv a7, zero
 ; RV32I-FPELIM-NEXT:    call callee_float_on_stack
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_float_on_stack:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a1, 264704
 ; RV32I-WITHFP-NEXT:    addi a0, zero, 1
@@ -139,8 +139,8 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    mv a5, zero
 ; RV32I-WITHFP-NEXT:    mv a7, zero
 ; RV32I-WITHFP-NEXT:    call callee_float_on_stack
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call i32 @callee_float_on_stack(i64 1, i64 2, i64 3, i64 4, float 5.0)
@@ -156,12 +156,12 @@ define float @callee_tiny_scalar_ret() nounwind {
 ; RV32I-WITHFP-LABEL: callee_tiny_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a0, 260096
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   ret float 1.0
@@ -171,21 +171,21 @@ define i32 @caller_tiny_scalar_ret() nounwind {
 ; RV32I-FPELIM-LABEL: caller_tiny_scalar_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
-; RV32I-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    call callee_tiny_scalar_ret
-; RV32I-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: caller_tiny_scalar_ret:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -16
-; RV32I-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    call callee_tiny_scalar_ret
-; RV32I-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32I-WITHFP-NEXT:    ret
   %1 = call float @callee_tiny_scalar_ret()

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
index f70c03d2cb48..80b0d9695feb 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
@@ -21,12 +21,12 @@ define i32 @caller_double_in_fpr() nounwind {
 ; RV32-ILP32D-LABEL: caller_double_in_fpr:
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
-; RV32-ILP32D-NEXT:    sw ra, 12(sp)
+; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI1_0)(a0)
 ; RV32-ILP32D-NEXT:    addi a0, zero, 1
 ; RV32-ILP32D-NEXT:    call callee_double_in_fpr
-; RV32-ILP32D-NEXT:    lw ra, 12(sp)
+; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
   %1 = call i32 @callee_double_in_fpr(i32 1, double 2.0)
@@ -50,7 +50,7 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32D-LABEL: caller_double_in_fpr_exhausted_gprs:
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
-; RV32-ILP32D-NEXT:    sw ra, 12(sp)
+; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    addi a1, zero, 5
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI3_0)(a0)
@@ -64,7 +64,7 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32D-NEXT:    mv a5, zero
 ; RV32-ILP32D-NEXT:    mv a7, zero
 ; RV32-ILP32D-NEXT:    call callee_double_in_fpr_exhausted_gprs
-; RV32-ILP32D-NEXT:    lw ra, 12(sp)
+; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
   %1 = call i32 @callee_double_in_fpr_exhausted_gprs(
@@ -95,7 +95,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32D-LABEL: caller_double_in_gpr_exhausted_fprs:
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
-; RV32-ILP32D-NEXT:    sw ra, 12(sp)
+; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI5_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI5_0)(a0)
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI5_1)
@@ -115,7 +115,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32D-NEXT:    lui a1, 262688
 ; RV32-ILP32D-NEXT:    mv a0, zero
 ; RV32-ILP32D-NEXT:    call callee_double_in_gpr_exhausted_fprs
-; RV32-ILP32D-NEXT:    lw ra, 12(sp)
+; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
   %1 = call i32 @callee_double_in_gpr_exhausted_fprs(
@@ -146,7 +146,7 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
 ; RV32-ILP32D-LABEL: caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs:
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
-; RV32-ILP32D-NEXT:    sw ra, 12(sp)
+; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    lui a1, 262816
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI7_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI7_0)(a0)
@@ -174,7 +174,7 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
 ; RV32-ILP32D-NEXT:    mv a5, zero
 ; RV32-ILP32D-NEXT:    mv a7, zero
 ; RV32-ILP32D-NEXT:    call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs
-; RV32-ILP32D-NEXT:    lw ra, 12(sp)
+; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
   %1 = call i32 @callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs(
@@ -202,7 +202,7 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32D-LABEL: caller_double_on_stack_exhausted_gprs_fprs:
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
-; RV32-ILP32D-NEXT:    sw ra, 12(sp)
+; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    lui a0, 262816
 ; RV32-ILP32D-NEXT:    sw a0, 4(sp)
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI9_0)
@@ -231,7 +231,7 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32D-NEXT:    mv a5, zero
 ; RV32-ILP32D-NEXT:    mv a7, zero
 ; RV32-ILP32D-NEXT:    call callee_double_on_stack_exhausted_gprs_fprs
-; RV32-ILP32D-NEXT:    lw ra, 12(sp)
+; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
   %1 = call i32 @callee_double_on_stack_exhausted_gprs_fprs(
@@ -253,11 +253,11 @@ define i32 @caller_double_ret() nounwind {
 ; RV32-ILP32D-LABEL: caller_double_ret:
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
-; RV32-ILP32D-NEXT:    sw ra, 12(sp)
+; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    call callee_double_ret
 ; RV32-ILP32D-NEXT:    fsd fa0, 0(sp)
 ; RV32-ILP32D-NEXT:    lw a0, 0(sp)
-; RV32-ILP32D-NEXT:    lw ra, 12(sp)
+; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
   %1 = call double @callee_double_ret()

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
index ca90990d8b10..f571c0a7a2e2 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
@@ -24,12 +24,12 @@ define i32 @caller_float_in_fpr() nounwind {
 ; RV32-ILP32FD-LABEL: caller_float_in_fpr:
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
-; RV32-ILP32FD-NEXT:    sw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32-ILP32FD-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
 ; RV32-ILP32FD-NEXT:    addi a0, zero, 1
 ; RV32-ILP32FD-NEXT:    call callee_float_in_fpr
-; RV32-ILP32FD-NEXT:    lw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
   %1 = call i32 @callee_float_in_fpr(i32 1, float 2.0)
@@ -53,7 +53,7 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32FD-LABEL: caller_float_in_fpr_exhausted_gprs:
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
-; RV32-ILP32FD-NEXT:    sw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32FD-NEXT:    addi a1, zero, 5
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32-ILP32FD-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
@@ -67,7 +67,7 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32FD-NEXT:    mv a5, zero
 ; RV32-ILP32FD-NEXT:    mv a7, zero
 ; RV32-ILP32FD-NEXT:    call callee_float_in_fpr_exhausted_gprs
-; RV32-ILP32FD-NEXT:    lw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
   %1 = call i32 @callee_float_in_fpr_exhausted_gprs(
@@ -94,7 +94,7 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32FD-LABEL: caller_float_in_gpr_exhausted_fprs:
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
-; RV32-ILP32FD-NEXT:    sw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI5_0)
 ; RV32-ILP32FD-NEXT:    flw fa0, %lo(.LCPI5_0)(a0)
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI5_1)
@@ -113,7 +113,7 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32FD-NEXT:    flw fa7, %lo(.LCPI5_7)(a0)
 ; RV32-ILP32FD-NEXT:    lui a0, 266496
 ; RV32-ILP32FD-NEXT:    call callee_float_in_gpr_exhausted_fprs
-; RV32-ILP32FD-NEXT:    lw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
   %1 = call i32 @callee_float_in_gpr_exhausted_fprs(
@@ -140,7 +140,7 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32FD-LABEL: caller_float_on_stack_exhausted_gprs_fprs:
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
-; RV32-ILP32FD-NEXT:    sw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32FD-NEXT:    lui a1, 267520
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI7_0)
 ; RV32-ILP32FD-NEXT:    flw fa0, %lo(.LCPI7_0)(a0)
@@ -168,7 +168,7 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32FD-NEXT:    mv a5, zero
 ; RV32-ILP32FD-NEXT:    mv a7, zero
 ; RV32-ILP32FD-NEXT:    call callee_float_on_stack_exhausted_gprs_fprs
-; RV32-ILP32FD-NEXT:    lw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
   %1 = call i32 @callee_float_on_stack_exhausted_gprs_fprs(
@@ -190,10 +190,10 @@ define i32 @caller_float_ret() nounwind {
 ; RV32-ILP32FD-LABEL: caller_float_ret:
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
-; RV32-ILP32FD-NEXT:    sw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32FD-NEXT:    call callee_float_ret
 ; RV32-ILP32FD-NEXT:    fmv.x.w a0, fa0
-; RV32-ILP32FD-NEXT:    lw ra, 12(sp)
+; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
 ; RV32-ILP32FD-NEXT:    ret
   %1 = call float @callee_float_ret()

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
index 1f5730c9ab8c..e415b40411c9 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
@@ -13,14 +13,14 @@ define i64 @callee_double_in_regs(i64 %a, double %b) nounwind {
 ; RV64I-LABEL: callee_double_in_regs:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    call __fixdfdi
+; RV64I-NEXT:    call __fixdfdi at plt
 ; RV64I-NEXT:    add a0, s0, a0
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %b_fptosi = fptosi double %b to i64
@@ -32,12 +32,12 @@ define i64 @caller_double_in_regs() nounwind {
 ; RV64I-LABEL: caller_double_in_regs:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, zero, 1
 ; RV64I-NEXT:    slli a1, a0, 62
 ; RV64I-NEXT:    addi a0, zero, 1
 ; RV64I-NEXT:    call callee_double_in_regs
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i64 @callee_double_in_regs(i64 1, double 2.0)
@@ -57,9 +57,9 @@ define i64 @caller_double_ret() nounwind {
 ; RV64I-LABEL: caller_double_ret:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call callee_double_ret
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call double @callee_double_ret()

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index 175abe20bdea..f1fdd0de6730 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -31,12 +31,12 @@ define i64 @caller_i128_in_regs() nounwind {
 ; RV64I-LABEL: caller_i128_in_regs:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, zero, 1
 ; RV64I-NEXT:    addi a1, zero, 2
 ; RV64I-NEXT:    mv a2, zero
 ; RV64I-NEXT:    call callee_i128_in_regs
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
@@ -82,7 +82,7 @@ define i32 @caller_many_scalars() nounwind {
 ; RV64I-LABEL: caller_many_scalars:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, zero, 8
 ; RV64I-NEXT:    sd a0, 8(sp)
 ; RV64I-NEXT:    addi a0, zero, 1
@@ -95,7 +95,7 @@ define i32 @caller_many_scalars() nounwind {
 ; RV64I-NEXT:    sd zero, 0(sp)
 ; RV64I-NEXT:    mv a4, zero
 ; RV64I-NEXT:    call callee_many_scalars
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i128 4, i32 5, i32 6, i128 7, i32 8)
@@ -133,7 +133,7 @@ define i64 @caller_large_scalars() nounwind {
 ; RV64I-LABEL: caller_large_scalars:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -80
-; RV64I-NEXT:    sd ra, 72(sp)
+; RV64I-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd zero, 24(sp)
 ; RV64I-NEXT:    sd zero, 16(sp)
 ; RV64I-NEXT:    sd zero, 8(sp)
@@ -147,7 +147,7 @@ define i64 @caller_large_scalars() nounwind {
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    sd a2, 32(sp)
 ; RV64I-NEXT:    call callee_large_scalars
-; RV64I-NEXT:    ld ra, 72(sp)
+; RV64I-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 80
 ; RV64I-NEXT:    ret
   %1 = call i64 @callee_large_scalars(i256 1, i256 2)
@@ -188,7 +188,7 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV64I-LABEL: caller_large_scalars_exhausted_regs:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -96
-; RV64I-NEXT:    sd ra, 88(sp)
+; RV64I-NEXT:    sd ra, 88(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, sp, 16
 ; RV64I-NEXT:    sd a0, 8(sp)
 ; RV64I-NEXT:    addi a0, zero, 9
@@ -212,7 +212,7 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV64I-NEXT:    addi a7, sp, 48
 ; RV64I-NEXT:    sd t0, 48(sp)
 ; RV64I-NEXT:    call callee_large_scalars_exhausted_regs
-; RV64I-NEXT:    ld ra, 88(sp)
+; RV64I-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 96
 ; RV64I-NEXT:    ret
   %1 = call i64 @callee_large_scalars_exhausted_regs(
@@ -227,9 +227,9 @@ define i64 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
 ; RV64I-LABEL: caller_mixed_scalar_libcalls:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __floatditf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floatditf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = sitofp i64 %a to fp128
@@ -259,11 +259,11 @@ define i64 @caller_small_coerced_struct() nounwind {
 ; RV64I-LABEL: caller_small_coerced_struct:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, zero, 1
 ; RV64I-NEXT:    addi a1, zero, 2
 ; RV64I-NEXT:    call callee_small_coerced_struct
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i64 @callee_small_coerced_struct([2 x i64] [i64 1, i64 2])
@@ -293,7 +293,7 @@ define i64 @caller_large_struct() nounwind {
 ; RV64I-LABEL: caller_large_struct:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -80
-; RV64I-NEXT:    sd ra, 72(sp)
+; RV64I-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, zero, 1
 ; RV64I-NEXT:    sd a0, 40(sp)
 ; RV64I-NEXT:    addi a1, zero, 2
@@ -308,7 +308,7 @@ define i64 @caller_large_struct() nounwind {
 ; RV64I-NEXT:    sd a3, 32(sp)
 ; RV64I-NEXT:    addi a0, sp, 8
 ; RV64I-NEXT:    call callee_large_struct
-; RV64I-NEXT:    ld ra, 72(sp)
+; RV64I-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 80
 ; RV64I-NEXT:    ret
   %ls = alloca %struct.large, align 8
@@ -359,7 +359,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV64I-LABEL: caller_aligned_stack:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
-; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, zero, 12
 ; RV64I-NEXT:    sd a0, 48(sp)
 ; RV64I-NEXT:    addi a0, zero, 11
@@ -380,7 +380,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV64I-NEXT:    sd a6, 0(sp)
 ; RV64I-NEXT:    mv a6, zero
 ; RV64I-NEXT:    call callee_aligned_stack
-; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
   %1 = call i64 @callee_aligned_stack(i64 1, i64 2, i64 3, i64 4, i64 5,
@@ -403,13 +403,13 @@ define i64 @caller_small_scalar_ret() nounwind {
 ; RV64I-LABEL: caller_small_scalar_ret:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call callee_small_scalar_ret
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xori a0, a0, -2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i128 @callee_small_scalar_ret()
@@ -433,10 +433,10 @@ define i64 @caller_small_struct_ret() nounwind {
 ; RV64I-LABEL: caller_small_struct_ret:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call callee_small_struct_ret
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call %struct.small @callee_small_struct_ret()
@@ -467,10 +467,10 @@ define void @caller_large_scalar_ret() nounwind {
 ; RV64I-LABEL: caller_large_scalar_ret:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
 ; RV64I-NEXT:    call callee_large_scalar_ret
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
   %1 = call i256 @callee_large_scalar_ret()
@@ -510,13 +510,13 @@ define i64 @caller_large_struct_ret() nounwind {
 ; RV64I-LABEL: caller_large_struct_ret:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, sp, 8
 ; RV64I-NEXT:    call callee_large_struct_ret
 ; RV64I-NEXT:    ld a0, 8(sp)
 ; RV64I-NEXT:    ld a1, 32(sp)
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
   %1 = alloca %struct.large

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
index 3135fcd162d1..38df7f42a4c5 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
@@ -18,31 +18,31 @@ define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
 ; RV64I-FPELIM-LABEL: callee_float_in_regs:
 ; RV64I-FPELIM:       # %bb.0:
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
-; RV64I-FPELIM-NEXT:    sd ra, 8(sp)
-; RV64I-FPELIM-NEXT:    sd s0, 0(sp)
+; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-FPELIM-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    mv s0, a0
 ; RV64I-FPELIM-NEXT:    mv a0, a1
-; RV64I-FPELIM-NEXT:    call __fixsfdi
+; RV64I-FPELIM-NEXT:    call __fixsfdi at plt
 ; RV64I-FPELIM-NEXT:    add a0, s0, a0
-; RV64I-FPELIM-NEXT:    ld s0, 0(sp)
-; RV64I-FPELIM-NEXT:    ld ra, 8(sp)
+; RV64I-FPELIM-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV64I-FPELIM-NEXT:    ret
 ;
 ; RV64I-WITHFP-LABEL: callee_float_in_regs:
 ; RV64I-WITHFP:       # %bb.0:
 ; RV64I-WITHFP-NEXT:    addi sp, sp, -32
-; RV64I-WITHFP-NEXT:    sd ra, 24(sp)
-; RV64I-WITHFP-NEXT:    sd s0, 16(sp)
-; RV64I-WITHFP-NEXT:    sd s1, 8(sp)
+; RV64I-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV64I-WITHFP-NEXT:    mv s1, a0
 ; RV64I-WITHFP-NEXT:    mv a0, a1
-; RV64I-WITHFP-NEXT:    call __fixsfdi
+; RV64I-WITHFP-NEXT:    call __fixsfdi at plt
 ; RV64I-WITHFP-NEXT:    add a0, s1, a0
-; RV64I-WITHFP-NEXT:    ld s1, 8(sp)
-; RV64I-WITHFP-NEXT:    ld s0, 16(sp)
-; RV64I-WITHFP-NEXT:    ld ra, 24(sp)
+; RV64I-WITHFP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV64I-WITHFP-NEXT:    ret
   %b_fptosi = fptosi float %b to i64
@@ -54,25 +54,25 @@ define i64 @caller_float_in_regs() nounwind {
 ; RV64I-FPELIM-LABEL: caller_float_in_regs:
 ; RV64I-FPELIM:       # %bb.0:
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
-; RV64I-FPELIM-NEXT:    sd ra, 8(sp)
+; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV64I-FPELIM-NEXT:    lui a1, 262144
 ; RV64I-FPELIM-NEXT:    call callee_float_in_regs
-; RV64I-FPELIM-NEXT:    ld ra, 8(sp)
+; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV64I-FPELIM-NEXT:    ret
 ;
 ; RV64I-WITHFP-LABEL: caller_float_in_regs:
 ; RV64I-WITHFP:       # %bb.0:
 ; RV64I-WITHFP-NEXT:    addi sp, sp, -16
-; RV64I-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64I-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64I-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64I-WITHFP-NEXT:    addi a0, zero, 1
 ; RV64I-WITHFP-NEXT:    lui a1, 262144
 ; RV64I-WITHFP-NEXT:    call callee_float_in_regs
-; RV64I-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64I-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64I-WITHFP-NEXT:    ret
   %1 = call i64 @callee_float_in_regs(i64 1, float 2.0)
@@ -88,12 +88,12 @@ define i64 @callee_float_on_stack(i128 %a, i128 %b, i128 %c, i128 %d, float %e)
 ; RV64I-WITHFP-LABEL: callee_float_on_stack:
 ; RV64I-WITHFP:       # %bb.0:
 ; RV64I-WITHFP-NEXT:    addi sp, sp, -16
-; RV64I-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64I-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64I-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64I-WITHFP-NEXT:    lw a0, 0(s0)
-; RV64I-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64I-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64I-WITHFP-NEXT:    ret
   %1 = trunc i128 %d to i64
@@ -107,7 +107,7 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-FPELIM-LABEL: caller_float_on_stack:
 ; RV64I-FPELIM:       # %bb.0:
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
-; RV64I-FPELIM-NEXT:    sd ra, 8(sp)
+; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    lui a1, 264704
 ; RV64I-FPELIM-NEXT:    addi a0, zero, 1
 ; RV64I-FPELIM-NEXT:    addi a2, zero, 2
@@ -119,15 +119,15 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-FPELIM-NEXT:    mv a5, zero
 ; RV64I-FPELIM-NEXT:    mv a7, zero
 ; RV64I-FPELIM-NEXT:    call callee_float_on_stack
-; RV64I-FPELIM-NEXT:    ld ra, 8(sp)
+; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV64I-FPELIM-NEXT:    ret
 ;
 ; RV64I-WITHFP-LABEL: caller_float_on_stack:
 ; RV64I-WITHFP:       # %bb.0:
 ; RV64I-WITHFP-NEXT:    addi sp, sp, -32
-; RV64I-WITHFP-NEXT:    sd ra, 24(sp)
-; RV64I-WITHFP-NEXT:    sd s0, 16(sp)
+; RV64I-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV64I-WITHFP-NEXT:    lui a1, 264704
 ; RV64I-WITHFP-NEXT:    addi a0, zero, 1
@@ -140,8 +140,8 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-WITHFP-NEXT:    mv a5, zero
 ; RV64I-WITHFP-NEXT:    mv a7, zero
 ; RV64I-WITHFP-NEXT:    call callee_float_on_stack
-; RV64I-WITHFP-NEXT:    ld s0, 16(sp)
-; RV64I-WITHFP-NEXT:    ld ra, 24(sp)
+; RV64I-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV64I-WITHFP-NEXT:    ret
   %1 = call i64 @callee_float_on_stack(i128 1, i128 2, i128 3, i128 4, float 5.0)
@@ -157,12 +157,12 @@ define float @callee_tiny_scalar_ret() nounwind {
 ; RV64I-WITHFP-LABEL: callee_tiny_scalar_ret:
 ; RV64I-WITHFP:       # %bb.0:
 ; RV64I-WITHFP-NEXT:    addi sp, sp, -16
-; RV64I-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64I-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64I-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64I-WITHFP-NEXT:    lui a0, 260096
-; RV64I-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64I-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64I-WITHFP-NEXT:    ret
   ret float 1.0
@@ -175,23 +175,23 @@ define i64 @caller_tiny_scalar_ret() nounwind {
 ; RV64I-FPELIM-LABEL: caller_tiny_scalar_ret:
 ; RV64I-FPELIM:       # %bb.0:
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
-; RV64I-FPELIM-NEXT:    sd ra, 8(sp)
+; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    call callee_tiny_scalar_ret
 ; RV64I-FPELIM-NEXT:    sext.w a0, a0
-; RV64I-FPELIM-NEXT:    ld ra, 8(sp)
+; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
 ; RV64I-FPELIM-NEXT:    ret
 ;
 ; RV64I-WITHFP-LABEL: caller_tiny_scalar_ret:
 ; RV64I-WITHFP:       # %bb.0:
 ; RV64I-WITHFP-NEXT:    addi sp, sp, -16
-; RV64I-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64I-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64I-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64I-WITHFP-NEXT:    call callee_tiny_scalar_ret
 ; RV64I-WITHFP-NEXT:    sext.w a0, a0
-; RV64I-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64I-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64I-WITHFP-NEXT:    ret
   %1 = call float @callee_tiny_scalar_ret()

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
index bc86428d0836..4cad6819532a 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
@@ -31,7 +31,7 @@ define float @caller_onstack_f32_noop(float %a) nounwind {
 ; RV32IF-LABEL: caller_onstack_f32_noop:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    sw a0, 4(sp)
 ; RV32IF-NEXT:    lui a1, 264704
 ; RV32IF-NEXT:    addi a0, zero, 1
@@ -44,7 +44,7 @@ define float @caller_onstack_f32_noop(float %a) nounwind {
 ; RV32IF-NEXT:    mv a5, zero
 ; RV32IF-NEXT:    mv a7, zero
 ; RV32IF-NEXT:    call onstack_f32_noop
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
   %1 = call float @onstack_f32_noop(i64 1, i64 2, i64 3, i64 4, float 5.0, float %a)
@@ -55,7 +55,7 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind {
 ; RV32IF-LABEL: caller_onstack_f32_fadd:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    fadd.s ft2, ft1, ft0
@@ -71,7 +71,7 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    mv a5, zero
 ; RV32IF-NEXT:    mv a7, zero
 ; RV32IF-NEXT:    call onstack_f32_noop
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
   %1 = fadd float %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
index 6609ab2008e9..5dcb3016f91d 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
@@ -15,9 +15,9 @@ define void @pass_uint8_as_uint8(i8 zeroext %a) nounwind {
 ; RV32I-LABEL: pass_uint8_as_uint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call receive_uint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call receive_uint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   call void @receive_uint8(i8 zeroext %a)
@@ -30,9 +30,9 @@ define zeroext i8 @ret_callresult_uint8_as_uint8() nounwind {
 ; RV32I-LABEL: ret_callresult_uint8_as_uint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_uint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_uint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call zeroext i8 @return_uint8()
@@ -54,11 +54,11 @@ define void @pass_uint8_as_sint8(i8 zeroext %a) nounwind {
 ; RV32I-LABEL: pass_uint8_as_sint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    call receive_sint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call receive_sint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 
@@ -70,11 +70,11 @@ define signext i8 @ret_callresult_uint8_as_sint8() nounwind {
 ; RV32I-LABEL: ret_callresult_uint8_as_sint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_uint8
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_uint8 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call zeroext i8 @return_uint8()
@@ -95,9 +95,9 @@ define void @pass_uint8_as_anyint32(i8 zeroext %a) nounwind {
 ; RV32I-LABEL: pass_uint8_as_anyint32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call receive_anyint32
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call receive_anyint32 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = zext i8 %a to i32
@@ -109,9 +109,9 @@ define signext i32 @ret_callresult_uint8_as_anyint32() nounwind {
 ; RV32I-LABEL: ret_callresult_uint8_as_anyint32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_uint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_uint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call zeroext i8 @return_uint8()
@@ -131,10 +131,10 @@ define void @pass_sint8_as_uint8(i8 signext %a) nounwind {
 ; RV32I-LABEL: pass_sint8_as_uint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    call receive_uint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call receive_uint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   call void @receive_uint8(i8 zeroext %a)
@@ -147,10 +147,10 @@ define zeroext i8 @ret_callresult_sint8_as_uint8() nounwind {
 ; RV32I-LABEL: ret_callresult_sint8_as_uint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_sint8
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_sint8 at plt
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call signext i8 @return_sint8()
@@ -168,9 +168,9 @@ define void @pass_sint8_as_sint8(i8 signext %a) nounwind {
 ; RV32I-LABEL: pass_sint8_as_sint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call receive_sint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call receive_sint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   call void @receive_sint8(i8 signext %a)
@@ -181,9 +181,9 @@ define signext i8 @ret_callresult_sint8_as_sint8() nounwind {
 ; RV32I-LABEL: ret_callresult_sint8_as_sint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_sint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_sint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call signext i8 @return_sint8()
@@ -202,9 +202,9 @@ define void @pass_sint8_as_anyint32(i8 signext %a) nounwind {
 ; RV32I-LABEL: pass_sint8_as_anyint32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call receive_anyint32
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call receive_anyint32 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = sext i8 %a to i32
@@ -216,9 +216,9 @@ define signext i32 @ret_callresult_sint8_as_anyint32() nounwind {
 ; RV32I-LABEL: ret_callresult_sint8_as_anyint32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_sint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_sint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call signext i8 @return_sint8()
@@ -239,10 +239,10 @@ define void @pass_anyint32_as_uint8(i32 signext %a) nounwind {
 ; RV32I-LABEL: pass_anyint32_as_uint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    call receive_uint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call receive_uint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = trunc i32 %a to i8
@@ -256,10 +256,10 @@ define zeroext i8 @ret_callresult_anyint32_as_uint8() nounwind {
 ; RV32I-LABEL: ret_callresult_anyint32_as_uint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_anyint32
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_anyint32 at plt
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call signext i32 @return_anyint32()
@@ -281,11 +281,11 @@ define void @pass_anyint32_as_sint8(i32 signext %a) nounwind {
 ; RV32I-LABEL: pass_anyint32_as_sint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    call receive_sint8
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call receive_sint8 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = trunc i32 %a to i8
@@ -297,11 +297,11 @@ define signext i8 @ret_callresult_anyint32_as_sint8() nounwind {
 ; RV32I-LABEL: ret_callresult_anyint32_as_sint8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_anyint32
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_anyint32 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call signext i32 @return_anyint32()
@@ -320,9 +320,9 @@ define void @pass_anyint32_as_anyint32(i32 signext %a) nounwind {
 ; RV32I-LABEL: pass_anyint32_as_anyint32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call receive_anyint32
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call receive_anyint32 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   call void @receive_anyint32(i32 signext %a)
@@ -333,9 +333,9 @@ define signext i32 @ret_callresult_anyint32_as_anyint32() nounwind {
 ; RV32I-LABEL: ret_callresult_anyint32_as_anyint32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call return_anyint32
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call return_anyint32 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
   %1 = call signext i32 @return_anyint32()

diff  --git a/llvm/test/CodeGen/RISCV/calls.ll b/llvm/test/CodeGen/RISCV/calls.ll
index 801b298da0dc..91644e6d7e27 100644
--- a/llvm/test/CodeGen/RISCV/calls.ll
+++ b/llvm/test/CodeGen/RISCV/calls.ll
@@ -10,18 +10,18 @@ define i32 @test_call_external(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_external:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call external_function
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call external_function at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_external:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    call external_function at plt
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call i32 @external_function(i32 %a)
@@ -34,18 +34,18 @@ define i32 @test_call_dso_local(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_dso_local:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call dso_local_function
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_dso_local:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    call dso_local_function
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call i32 @dso_local_function(i32 %a)
@@ -70,18 +70,18 @@ define i32 @test_call_defined(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_defined:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call defined_function
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_defined:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    call defined_function at plt
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call i32 @defined_function(i32 %a)
@@ -92,22 +92,22 @@ define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind {
 ; RV32I-LABEL: test_call_indirect:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    jalr a2
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_indirect:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    mv a2, a0
 ; RV32I-PIC-NEXT:    mv a0, a1
 ; RV32I-PIC-NEXT:    jalr a2
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call i32 %a(i32 %b)
@@ -135,26 +135,26 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: test_call_fastcc:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    call fastcc_function
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_fastcc:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
-; RV32I-PIC-NEXT:    sw s0, 8(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-PIC-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    mv s0, a0
 ; RV32I-PIC-NEXT:    call fastcc_function at plt
 ; RV32I-PIC-NEXT:    mv a0, s0
-; RV32I-PIC-NEXT:    lw s0, 8(sp)
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call fastcc i32 @fastcc_function(i32 %a, i32 %b)
@@ -167,8 +167,8 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_external_many_args:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    sw a0, 4(sp)
 ; RV32I-NEXT:    sw a0, 0(sp)
@@ -179,18 +179,18 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
 ; RV32I-NEXT:    mv a5, a0
 ; RV32I-NEXT:    mv a6, a0
 ; RV32I-NEXT:    mv a7, a0
-; RV32I-NEXT:    call external_many_args
+; RV32I-NEXT:    call external_many_args at plt
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_external_many_args:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
-; RV32I-PIC-NEXT:    sw s0, 8(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-PIC-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    mv s0, a0
 ; RV32I-PIC-NEXT:    sw a0, 4(sp)
 ; RV32I-PIC-NEXT:    sw a0, 0(sp)
@@ -203,8 +203,8 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
 ; RV32I-PIC-NEXT:    mv a7, a0
 ; RV32I-PIC-NEXT:    call external_many_args at plt
 ; RV32I-PIC-NEXT:    mv a0, s0
-; RV32I-PIC-NEXT:    lw s0, 8(sp)
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call i32 @external_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a,
@@ -232,7 +232,7 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_defined_many_args:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a0, 4(sp)
 ; RV32I-NEXT:    sw a0, 0(sp)
 ; RV32I-NEXT:    mv a1, a0
@@ -243,14 +243,14 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
 ; RV32I-NEXT:    mv a6, a0
 ; RV32I-NEXT:    mv a7, a0
 ; RV32I-NEXT:    call defined_many_args
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32I-PIC-LABEL: test_call_defined_many_args:
 ; RV32I-PIC:       # %bb.0:
 ; RV32I-PIC-NEXT:    addi sp, sp, -16
-; RV32I-PIC-NEXT:    sw ra, 12(sp)
+; RV32I-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-PIC-NEXT:    sw a0, 4(sp)
 ; RV32I-PIC-NEXT:    sw a0, 0(sp)
 ; RV32I-PIC-NEXT:    mv a1, a0
@@ -261,7 +261,7 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
 ; RV32I-PIC-NEXT:    mv a6, a0
 ; RV32I-PIC-NEXT:    mv a7, a0
 ; RV32I-PIC-NEXT:    call defined_many_args at plt
-; RV32I-PIC-NEXT:    lw ra, 12(sp)
+; RV32I-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-PIC-NEXT:    addi sp, sp, 16
 ; RV32I-PIC-NEXT:    ret
   %1 = call i32 @defined_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a,

diff  --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index 340fe5c6719c..ad80ae63ca9b 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -300,84 +300,84 @@ define half @fold_demote_h_s(half %a, float %b) nounwind {
 ; RV32I-LABEL: fold_demote_h_s:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    call __gnu_h2f_ieee
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    and a2, s0, a1
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    call __gnu_f2h_ieee
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __gnu_f2h_ieee at plt
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_h_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    call __gnu_h2f_ieee
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    and a2, s0, a1
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    call __gnu_f2h_ieee
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __gnu_f2h_ieee at plt
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_demote_h_s:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    fsw fs0, 8(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa1
-; RV32IF-NEXT:    call __gnu_f2h_ieee
-; RV32IF-NEXT:    call __gnu_h2f_ieee
+; RV32IF-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IF-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32IF-NEXT:    fsgnj.s fa0, fa0, fs0
-; RV32IF-NEXT:    flw fs0, 8(sp)
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: fold_demote_h_s:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    fsd fs0, 0(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.s fs0, fa1
-; RV32IFD-NEXT:    call __gnu_f2h_ieee
-; RV32IFD-NEXT:    call __gnu_h2f_ieee
+; RV32IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IFD-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32IFD-NEXT:    fsgnj.s fa0, fa0, fs0
-; RV32IFD-NEXT:    fld fs0, 0(sp)
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fold_demote_h_s:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    fsd fs0, 0(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.s fs0, fa1
-; RV64IFD-NEXT:    call __gnu_f2h_ieee
-; RV64IFD-NEXT:    call __gnu_h2f_ieee
+; RV64IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV64IFD-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64IFD-NEXT:    fsgnj.s fa0, fa0, fs0
-; RV64IFD-NEXT:    fld fs0, 0(sp)
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
@@ -407,34 +407,34 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV32I-LABEL: fold_demote_h_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    call __gnu_h2f_ieee
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    and a2, s0, a1
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    call __gnu_f2h_ieee
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __gnu_f2h_ieee at plt
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_h_d:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    call __gnu_h2f_ieee
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
@@ -443,54 +443,54 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV64I-NEXT:    and a1, s0, a1
 ; RV64I-NEXT:    srli a1, a1, 32
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    call __gnu_f2h_ieee
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __gnu_f2h_ieee at plt
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_demote_h_d:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    sw s0, 8(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a1
-; RV32IF-NEXT:    call __gnu_f2h_ieee
-; RV32IF-NEXT:    call __gnu_h2f_ieee
+; RV32IF-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IF-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, s0
 ; RV32IF-NEXT:    fsgnj.s fa0, fa0, ft0
-; RV32IF-NEXT:    lw s0, 8(sp)
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: fold_demote_h_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    fsd fs0, 0(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa1
-; RV32IFD-NEXT:    call __gnu_f2h_ieee
-; RV32IFD-NEXT:    call __gnu_h2f_ieee
+; RV32IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IFD-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32IFD-NEXT:    fcvt.s.d ft0, fs0
 ; RV32IFD-NEXT:    fsgnj.s fa0, fa0, ft0
-; RV32IFD-NEXT:    fld fs0, 0(sp)
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fold_demote_h_d:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    fsd fs0, 0(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa1
-; RV64IFD-NEXT:    call __gnu_f2h_ieee
-; RV64IFD-NEXT:    call __gnu_h2f_ieee
+; RV64IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV64IFD-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64IFD-NEXT:    fcvt.s.d ft0, fs0
 ; RV64IFD-NEXT:    fsgnj.s fa0, fa0, ft0
-; RV64IFD-NEXT:    fld fs0, 0(sp)
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index e3504b927864..fea3149ba230 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -12,9 +12,9 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: udiv:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __udivsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -26,13 +26,13 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: udiv:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __udivdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -48,10 +48,10 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV32I-LABEL: udiv_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 5
-; RV32I-NEXT:    call __udivsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -66,12 +66,12 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV64I-LABEL: udiv_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __udivdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -122,27 +122,27 @@ define i64 @udiv64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: udiv64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __udivdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __udivdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: udiv64:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
-; RV32IM-NEXT:    call __udivdi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    call __udivdi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: udiv64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __udivdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -158,32 +158,32 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV32I-LABEL: udiv64_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __udivdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __udivdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: udiv64_constant:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    addi a2, zero, 5
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __udivdi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    call __udivdi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: udiv64_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __udivdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -208,9 +208,9 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sdiv:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __divsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -222,11 +222,11 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: sdiv:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __divdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -242,10 +242,10 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV32I-LABEL: sdiv_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 5
-; RV32I-NEXT:    call __divsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -262,11 +262,11 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV64I-LABEL: sdiv_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __divdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -332,27 +332,27 @@ define i64 @sdiv64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: sdiv64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __divdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: sdiv64:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
-; RV32IM-NEXT:    call __divdi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: sdiv64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __divdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -368,32 +368,32 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV32I-LABEL: sdiv64_constant:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 5
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __divdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: sdiv64_constant:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    addi a2, zero, 5
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __divdi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: sdiv64_constant:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 5
-; RV64I-NEXT:    call __divdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -423,35 +423,35 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sdiv64_sext_operands:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    srai a3, a2, 31
-; RV32I-NEXT:    call __divdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __divdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: sdiv64_sext_operands:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    mv a2, a1
 ; RV32IM-NEXT:    srai a1, a0, 31
 ; RV32IM-NEXT:    srai a3, a2, 31
-; RV32IM-NEXT:    call __divdi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    call __divdi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: sdiv64_sext_operands:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __divdi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 356f632a3f34..3325835ddd5c 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -11,28 +11,28 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_false:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, zero, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB0_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.then
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB0_2: # %if.else
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_false:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    addi a0, zero, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB0_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.then
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB0_2: # %if.else
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp false double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -46,7 +46,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_oeq:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -56,26 +56,26 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB1_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB1_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_oeq:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB1_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB1_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -92,7 +92,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_oeq_alt:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -103,27 +103,27 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    beqz a0, .LBB2_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB2_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_oeq_alt:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    beqz a0, .LBB2_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB2_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -137,7 +137,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ogt:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -147,26 +147,26 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB3_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB3_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ogt:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB3_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB3_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ogt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -180,7 +180,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_oge:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -190,26 +190,26 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB4_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB4_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_oge:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB4_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB4_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp oge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -223,7 +223,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_olt:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -233,26 +233,26 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB5_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB5_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_olt:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB5_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB5_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp olt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -266,7 +266,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ole:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -276,26 +276,26 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB6_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB6_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ole:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB6_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB6_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ole double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -310,7 +310,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_one:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -325,16 +325,16 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB7_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB7_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_one:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft1
@@ -345,11 +345,11 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB7_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB7_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp one double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -363,7 +363,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ord:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -375,16 +375,16 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB8_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB8_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ord:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft1
@@ -392,11 +392,11 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB8_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB8_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ord double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -410,7 +410,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ueq:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -425,16 +425,16 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    bnez a0, .LBB9_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB9_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ueq:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft0
@@ -445,11 +445,11 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    bnez a0, .LBB9_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB9_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ueq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -463,7 +463,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ugt:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -474,27 +474,27 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB10_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB10_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ugt:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB10_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB10_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ugt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -508,7 +508,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_uge:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -519,27 +519,27 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB11_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB11_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_uge:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB11_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB11_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp uge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -553,7 +553,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ult:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -564,27 +564,27 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB12_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ult:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB12_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ult double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -598,7 +598,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ule:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -609,27 +609,27 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB13_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB13_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ule:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB13_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB13_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp ule double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -643,7 +643,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_une:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -654,27 +654,27 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB14_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_une:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB14_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp une double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -689,7 +689,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_uno:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft0, 0(sp)
@@ -702,16 +702,16 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    seqz a0, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB15_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB15_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_uno:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft1
@@ -720,11 +720,11 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    seqz a0, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB15_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB15_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp uno double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -738,28 +738,28 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_true:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, zero, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB16_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB16_2: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_true:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    addi a0, zero, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB16_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB16_2: # %if.then
-; RV64IFD-NEXT:    call abort
+; RV64IFD-NEXT:    call abort at plt
   %1 = fcmp true double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:

diff  --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index 837cb819ec9b..b95661174419 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -33,7 +33,7 @@ define double @caller_double_inreg() nounwind {
 ; RV32IFD-LABEL: caller_double_inreg:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    lui a0, 262236
 ; RV32IFD-NEXT:    addi a1, a0, 655
 ; RV32IFD-NEXT:    lui a0, 377487
@@ -42,7 +42,7 @@ define double @caller_double_inreg() nounwind {
 ; RV32IFD-NEXT:    addi a3, a2, 655
 ; RV32IFD-NEXT:    mv a2, a0
 ; RV32IFD-NEXT:    call callee_double_inreg
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
   %1 = call double @callee_double_inreg(double 2.720000e+00, double 3.720000e+00)
@@ -74,7 +74,7 @@ define double @caller_double_split_reg_stack() nounwind {
 ; RV32IFD-LABEL: caller_double_split_reg_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    lui a0, 262510
 ; RV32IFD-NEXT:    addi a2, a0, 327
 ; RV32IFD-NEXT:    lui a0, 262446
@@ -89,7 +89,7 @@ define double @caller_double_split_reg_stack() nounwind {
 ; RV32IFD-NEXT:    mv a4, zero
 ; RV32IFD-NEXT:    mv a7, a5
 ; RV32IFD-NEXT:    call callee_double_split_reg_stack
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
   %1 = call double @callee_double_split_reg_stack(i32 1, i64 2, i64 3, double 4.72, double 5.72)
@@ -116,7 +116,7 @@ define double @caller_double_stack() nounwind {
 ; RV32IFD-LABEL: caller_double_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp)
+; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    lui a0, 262510
 ; RV32IFD-NEXT:    addi a0, a0, 327
 ; RV32IFD-NEXT:    sw a0, 4(sp)
@@ -136,7 +136,7 @@ define double @caller_double_stack() nounwind {
 ; RV32IFD-NEXT:    mv a5, zero
 ; RV32IFD-NEXT:    mv a7, zero
 ; RV32IFD-NEXT:    call callee_double_stack
-; RV32IFD-NEXT:    lw ra, 28(sp)
+; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
   %1 = call double @callee_double_stack(i64 1, i64 2, i64 3, i64 4, double 5.72, double 6.72)

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index f7abdea68ca2..7a27a8e569a9 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -136,9 +136,9 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_l_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call __fixdfdi
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
@@ -155,9 +155,9 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_lu_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call __fixunsdfdi
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
@@ -203,9 +203,9 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_l:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call __floatdidf
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call __floatdidf at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
@@ -222,9 +222,9 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_lu:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call __floatundidf
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call __floatundidf at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/double-frem.ll b/llvm/test/CodeGen/RISCV/double-frem.ll
index 07f84ac11ce2..f89849949014 100644
--- a/llvm/test/CodeGen/RISCV/double-frem.ll
+++ b/llvm/test/CodeGen/RISCV/double-frem.ll
@@ -6,9 +6,9 @@ define double @frem_f64(double %a, double %b) nounwind {
 ; RV32ID-LABEL: frem_f64:
 ; RV32ID:       # %bb.0:
 ; RV32ID-NEXT:    addi sp, sp, -16
-; RV32ID-NEXT:    sw ra, 12(sp)
-; RV32ID-NEXT:    call fmod
-; RV32ID-NEXT:    lw ra, 12(sp)
+; RV32ID-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ID-NEXT:    call fmod at plt
+; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    addi sp, sp, 16
 ; RV32ID-NEXT:    ret
   %1 = frem double %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 4126c01cb637..3e7d26732ba1 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -36,19 +36,19 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32IFD-LABEL: powi_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call __powidf2
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call __powidf2 at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: powi_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    sext.w a1, a1
-; RV64IFD-NEXT:    call __powidf2
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    call __powidf2 at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.powi.f64(double %a, i32 %b)
@@ -61,18 +61,18 @@ define double @sin_f64(double %a) nounwind {
 ; RV32IFD-LABEL: sin_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call sin
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call sin at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: sin_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call sin
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call sin at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.sin.f64(double %a)
@@ -85,18 +85,18 @@ define double @cos_f64(double %a) nounwind {
 ; RV32IFD-LABEL: cos_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call cos
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call cos at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: cos_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call cos
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call cos at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.cos.f64(double %a)
@@ -108,50 +108,50 @@ define double @sincos_f64(double %a) nounwind {
 ; RV32IFD-LABEL: sincos_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp)
-; RV32IFD-NEXT:    sw s0, 24(sp)
-; RV32IFD-NEXT:    sw s1, 20(sp)
+; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    mv s0, a1
 ; RV32IFD-NEXT:    mv s1, a0
-; RV32IFD-NEXT:    call sin
+; RV32IFD-NEXT:    call sin at plt
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsd ft0, 0(sp)
+; RV32IFD-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    mv a0, s1
 ; RV32IFD-NEXT:    mv a1, s0
-; RV32IFD-NEXT:    call cos
+; RV32IFD-NEXT:    call cos at plt
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
+; RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
 ; RV32IFD-NEXT:    fsd ft0, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    lw s1, 20(sp)
-; RV32IFD-NEXT:    lw s0, 24(sp)
-; RV32IFD-NEXT:    lw ra, 28(sp)
+; RV32IFD-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: sincos_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -32
-; RV64IFD-NEXT:    sd ra, 24(sp)
-; RV64IFD-NEXT:    sd s0, 16(sp)
+; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv s0, a0
-; RV64IFD-NEXT:    call sin
+; RV64IFD-NEXT:    call sin at plt
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 8(sp)
+; RV64IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    mv a0, s0
-; RV64IFD-NEXT:    call cos
+; RV64IFD-NEXT:    call cos at plt
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fld ft1, 8(sp)
+; RV64IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
-; RV64IFD-NEXT:    ld s0, 16(sp)
-; RV64IFD-NEXT:    ld ra, 24(sp)
+; RV64IFD-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.sin.f64(double %a)
@@ -166,18 +166,18 @@ define double @pow_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: pow_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call pow
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call pow at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: pow_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call pow
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call pow at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.pow.f64(double %a, double %b)
@@ -190,18 +190,18 @@ define double @exp_f64(double %a) nounwind {
 ; RV32IFD-LABEL: exp_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call exp
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call exp at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call exp
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call exp at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.exp.f64(double %a)
@@ -214,18 +214,18 @@ define double @exp2_f64(double %a) nounwind {
 ; RV32IFD-LABEL: exp2_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call exp2
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call exp2 at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: exp2_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call exp2
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call exp2 at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.exp2.f64(double %a)
@@ -238,18 +238,18 @@ define double @log_f64(double %a) nounwind {
 ; RV32IFD-LABEL: log_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call log
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call log at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: log_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call log
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call log at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.log.f64(double %a)
@@ -262,18 +262,18 @@ define double @log10_f64(double %a) nounwind {
 ; RV32IFD-LABEL: log10_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call log10
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call log10 at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: log10_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call log10
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call log10 at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.log10.f64(double %a)
@@ -286,18 +286,18 @@ define double @log2_f64(double %a) nounwind {
 ; RV32IFD-LABEL: log2_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call log2
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call log2 at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: log2_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call log2
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call log2 at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.log2.f64(double %a)
@@ -506,18 +506,18 @@ define double @floor_f64(double %a) nounwind {
 ; RV32IFD-LABEL: floor_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call floor
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call floor at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: floor_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call floor
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call floor at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.floor.f64(double %a)
@@ -530,18 +530,18 @@ define double @ceil_f64(double %a) nounwind {
 ; RV32IFD-LABEL: ceil_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call ceil
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call ceil at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: ceil_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call ceil
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call ceil at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.ceil.f64(double %a)
@@ -554,18 +554,18 @@ define double @trunc_f64(double %a) nounwind {
 ; RV32IFD-LABEL: trunc_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call trunc
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call trunc at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: trunc_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call trunc
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call trunc at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.trunc.f64(double %a)
@@ -578,18 +578,18 @@ define double @rint_f64(double %a) nounwind {
 ; RV32IFD-LABEL: rint_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call rint
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call rint at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: rint_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call rint
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call rint at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.rint.f64(double %a)
@@ -602,18 +602,18 @@ define double @nearbyint_f64(double %a) nounwind {
 ; RV32IFD-LABEL: nearbyint_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call nearbyint
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call nearbyint at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: nearbyint_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call nearbyint
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call nearbyint at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.nearbyint.f64(double %a)
@@ -626,18 +626,18 @@ define double @round_f64(double %a) nounwind {
 ; RV32IFD-LABEL: round_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
-; RV32IFD-NEXT:    call round
-; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call round at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: round_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
-; RV64IFD-NEXT:    call round
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call round at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.round.f64(double %a)

diff  --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index ad6a3afc1e62..fa6f791b7591 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -158,36 +158,36 @@ define double @fld_stack(double %a) nounwind {
 ; RV32IFD-LABEL: fld_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp)
+; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsd ft0, 0(sp)
+; RV32IFD-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    addi a0, sp, 16
-; RV32IFD-NEXT:    call notdead
+; RV32IFD-NEXT:    call notdead at plt
 ; RV32IFD-NEXT:    fld ft0, 16(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
+; RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
 ; RV32IFD-NEXT:    fsd ft0, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    lw ra, 28(sp)
+; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fld_stack:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -32
-; RV64IFD-NEXT:    sd ra, 24(sp)
+; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 8(sp)
+; RV64IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    addi a0, sp, 16
-; RV64IFD-NEXT:    call notdead
+; RV64IFD-NEXT:    call notdead at plt
 ; RV64IFD-NEXT:    fld ft0, 16(sp)
-; RV64IFD-NEXT:    fld ft1, 8(sp)
+; RV64IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
-; RV64IFD-NEXT:    ld ra, 24(sp)
+; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
   %1 = alloca double, align 8
@@ -202,7 +202,7 @@ define void @fsd_stack(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fsd_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp)
+; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a2, 8(sp)
 ; RV32IFD-NEXT:    sw a3, 12(sp)
 ; RV32IFD-NEXT:    fld ft0, 8(sp)
@@ -212,22 +212,22 @@ define void @fsd_stack(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
 ; RV32IFD-NEXT:    fsd ft0, 16(sp)
 ; RV32IFD-NEXT:    addi a0, sp, 16
-; RV32IFD-NEXT:    call notdead
-; RV32IFD-NEXT:    lw ra, 28(sp)
+; RV32IFD-NEXT:    call notdead at plt
+; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsd_stack:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
 ; RV64IFD-NEXT:    fsd ft0, 0(sp)
 ; RV64IFD-NEXT:    mv a0, sp
-; RV64IFD-NEXT:    call notdead
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    call notdead at plt
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = fadd double %a, %b ; force store from FPR64

diff  --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index 7c40e96a9e1d..b6969745d854 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -16,7 +16,7 @@ define i32 @main() nounwind {
 ; RV32IFD-LABEL: main:
 ; RV32IFD:       # %bb.0: # %entry
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    lui a1, 262144
 ; RV32IFD-NEXT:    mv a0, zero
 ; RV32IFD-NEXT:    call test
@@ -34,10 +34,10 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    and a0, a0, a1
 ; RV32IFD-NEXT:    bnez a0, .LBB1_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.then
-; RV32IFD-NEXT:    call abort
+; RV32IFD-NEXT:    call abort at plt
 ; RV32IFD-NEXT:  .LBB1_2: # %if.end
 ; RV32IFD-NEXT:    mv a0, zero
-; RV32IFD-NEXT:    call exit
+; RV32IFD-NEXT:    call exit at plt
 entry:
   %call = call double @test(double 2.000000e+00)
   %cmp = fcmp olt double %call, 2.400000e-01

diff  --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index a595cd8d9e9e..9b283220a332 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -8,7 +8,7 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV32IFD-LABEL: func:
 ; RV32IFD:       # %bb.0: # %entry
 ; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp)
+; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 16(sp)
 ; RV32IFD-NEXT:    sw a1, 20(sp)
 ; RV32IFD-NEXT:    fld ft0, 16(sp)
@@ -18,25 +18,25 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV32IFD-NEXT:    fsd ft0, 16(sp)
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call func
 ; RV32IFD-NEXT:    sw a0, 16(sp)
 ; RV32IFD-NEXT:    sw a1, 20(sp)
 ; RV32IFD-NEXT:    fld ft0, 16(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
 ; RV32IFD-NEXT:  .LBB0_2: # %return
 ; RV32IFD-NEXT:    fsd ft0, 16(sp)
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
-; RV32IFD-NEXT:    lw ra, 28(sp)
+; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: func:
 ; RV64IFD:       # %bb.0: # %entry
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    slli a2, a1, 32
 ; RV64IFD-NEXT:    srli a2, a2, 32
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
@@ -44,14 +44,14 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
-; RV64IFD-NEXT:    fsd ft0, 0(sp)
+; RV64IFD-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call func
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fld ft1, 0(sp)
+; RV64IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
 ; RV64IFD-NEXT:  .LBB0_2: # %return
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
-; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
index fafd506bc78e..0215295ded1f 100644
--- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
+++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
@@ -17,9 +17,9 @@ define void @caller(i1* %p) personality i8* bitcast (i32 (...)* @__gxx_personali
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    .cfi_def_cfa_offset 16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    .cfi_offset s0, -8
 ; RV32I-NEXT:    .cfi_offset s1, -12
@@ -28,18 +28,18 @@ define void @caller(i1* %p) personality i8* bitcast (i32 (...)* @__gxx_personali
 ; RV32I-NEXT:  # %bb.1: # %bb2
 ; RV32I-NEXT:  .Ltmp0:
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call bar
+; RV32I-NEXT:    call bar at plt
 ; RV32I-NEXT:  .Ltmp1:
 ; RV32I-NEXT:    j .LBB0_3
 ; RV32I-NEXT:  .LBB0_2: # %bb1
 ; RV32I-NEXT:  .Ltmp2:
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .Ltmp3:
 ; RV32I-NEXT:  .LBB0_3: # %end2
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_4: # %lpad
@@ -48,15 +48,15 @@ define void @caller(i1* %p) personality i8* bitcast (i32 (...)* @__gxx_personali
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call callee
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call _Unwind_Resume
+; RV32I-NEXT:    call _Unwind_Resume at plt
 ;
 ; RV64I-LABEL: caller:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -32
 ; RV64I-NEXT:    .cfi_def_cfa_offset 32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    .cfi_offset s0, -16
 ; RV64I-NEXT:    .cfi_offset s1, -24
@@ -65,18 +65,18 @@ define void @caller(i1* %p) personality i8* bitcast (i32 (...)* @__gxx_personali
 ; RV64I-NEXT:  # %bb.1: # %bb2
 ; RV64I-NEXT:  .Ltmp0:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call bar
+; RV64I-NEXT:    call bar at plt
 ; RV64I-NEXT:  .Ltmp1:
 ; RV64I-NEXT:    j .LBB0_3
 ; RV64I-NEXT:  .LBB0_2: # %bb1
 ; RV64I-NEXT:  .Ltmp2:
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call foo
+; RV64I-NEXT:    call foo at plt
 ; RV64I-NEXT:  .Ltmp3:
 ; RV64I-NEXT:  .LBB0_3: # %end2
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB0_4: # %lpad
@@ -85,7 +85,7 @@ define void @caller(i1* %p) personality i8* bitcast (i32 (...)* @__gxx_personali
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call callee
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call _Unwind_Resume
+; RV64I-NEXT:    call _Unwind_Resume at plt
 entry:
   %0 = icmp eq i1* %p, null
   br i1 %0, label %bb1, label %bb2

diff  --git a/llvm/test/CodeGen/RISCV/fastcc-float.ll b/llvm/test/CodeGen/RISCV/fastcc-float.ll
index a70c26bf62f1..bf66c752e356 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-float.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-float.ll
@@ -17,7 +17,7 @@ define float @caller(<32 x float> %A) nounwind {
 ; CHECK-LABEL: caller:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -64
-; CHECK-NEXT:    sw ra, 60(sp)
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    flw fa0, 0(a0)
 ; CHECK-NEXT:    flw fa1, 4(a0)
 ; CHECK-NEXT:    flw fa2, 8(a0)
@@ -63,7 +63,7 @@ define float @caller(<32 x float> %A) nounwind {
 ; CHECK-NEXT:    fsw fs1, 4(sp)
 ; CHECK-NEXT:    fsw fs0, 0(sp)
 ; CHECK-NEXT:    call callee
-; CHECK-NEXT:    lw ra, 60(sp)
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 64
 ; CHECK-NEXT:    ret
 	%C = call fastcc float @callee(<32 x float> %A)

diff  --git a/llvm/test/CodeGen/RISCV/fastcc-int.ll b/llvm/test/CodeGen/RISCV/fastcc-int.ll
index a48639d66265..8b240dc8a635 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-int.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-int.ll
@@ -22,8 +22,8 @@ define i32 @caller(<16 x i32> %A) nounwind {
 ; RV32-LABEL: caller:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp)
-; RV32-NEXT:    sw s0, 24(sp)
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lw t0, 0(a0)
 ; RV32-NEXT:    lw a1, 4(a0)
 ; RV32-NEXT:    lw a2, 8(a0)
@@ -45,16 +45,16 @@ define i32 @caller(<16 x i32> %A) nounwind {
 ; RV32-NEXT:    sw t1, 0(sp)
 ; RV32-NEXT:    mv a0, t0
 ; RV32-NEXT:    call callee
-; RV32-NEXT:    lw s0, 24(sp)
-; RV32-NEXT:    lw ra, 28(sp)
+; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: caller:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -48
-; RV64-NEXT:    sd ra, 40(sp)
-; RV64-NEXT:    sd s0, 32(sp)
+; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    ld t0, 0(a0)
 ; RV64-NEXT:    ld a1, 8(a0)
 ; RV64-NEXT:    ld a2, 16(a0)
@@ -76,8 +76,8 @@ define i32 @caller(<16 x i32> %A) nounwind {
 ; RV64-NEXT:    sd t1, 0(sp)
 ; RV64-NEXT:    mv a0, t0
 ; RV64-NEXT:    call callee
-; RV64-NEXT:    ld s0, 32(sp)
-; RV64-NEXT:    ld ra, 40(sp)
+; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 48
 ; RV64-NEXT:    ret
 	%C = call fastcc i32 @callee(<16 x i32> %A)

diff  --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index d9054fc26fe5..16dce59a461f 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -66,22 +66,22 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV32F-LABEL: bitcast_double_and:
 ; RV32F:       # %bb.0:
 ; RV32F-NEXT:    addi sp, sp, -16
-; RV32F-NEXT:    sw ra, 12(sp)
-; RV32F-NEXT:    sw s0, 8(sp)
-; RV32F-NEXT:    sw s1, 4(sp)
+; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32F-NEXT:    mv s0, a1
 ; RV32F-NEXT:    mv s1, a0
-; RV32F-NEXT:    call __adddf3
+; RV32F-NEXT:    call __adddf3 at plt
 ; RV32F-NEXT:    mv a2, a0
 ; RV32F-NEXT:    lui a0, 524288
 ; RV32F-NEXT:    addi a0, a0, -1
 ; RV32F-NEXT:    and a3, a1, a0
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
-; RV32F-NEXT:    call __adddf3
-; RV32F-NEXT:    lw s1, 4(sp)
-; RV32F-NEXT:    lw s0, 8(sp)
-; RV32F-NEXT:    lw ra, 12(sp)
+; RV32F-NEXT:    call __adddf3 at plt
+; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    addi sp, sp, 16
 ; RV32F-NEXT:    ret
 ;
@@ -106,18 +106,18 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV64F-LABEL: bitcast_double_and:
 ; RV64F:       # %bb.0:
 ; RV64F-NEXT:    addi sp, sp, -16
-; RV64F-NEXT:    sd ra, 8(sp)
-; RV64F-NEXT:    sd s0, 0(sp)
+; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
-; RV64F-NEXT:    call __adddf3
+; RV64F-NEXT:    call __adddf3 at plt
 ; RV64F-NEXT:    addi a1, zero, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    addi a1, a1, -1
 ; RV64F-NEXT:    and a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
-; RV64F-NEXT:    call __adddf3
-; RV64F-NEXT:    ld s0, 0(sp)
-; RV64F-NEXT:    ld ra, 8(sp)
+; RV64F-NEXT:    call __adddf3 at plt
+; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    addi sp, sp, 16
 ; RV64F-NEXT:    ret
 ;
@@ -191,21 +191,21 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV32F-LABEL: bitcast_double_xor:
 ; RV32F:       # %bb.0:
 ; RV32F-NEXT:    addi sp, sp, -16
-; RV32F-NEXT:    sw ra, 12(sp)
-; RV32F-NEXT:    sw s0, 8(sp)
-; RV32F-NEXT:    sw s1, 4(sp)
+; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32F-NEXT:    mv s0, a1
 ; RV32F-NEXT:    mv s1, a0
-; RV32F-NEXT:    call __muldf3
+; RV32F-NEXT:    call __muldf3 at plt
 ; RV32F-NEXT:    mv a2, a0
 ; RV32F-NEXT:    lui a0, 524288
 ; RV32F-NEXT:    xor a3, a1, a0
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
-; RV32F-NEXT:    call __muldf3
-; RV32F-NEXT:    lw s1, 4(sp)
-; RV32F-NEXT:    lw s0, 8(sp)
-; RV32F-NEXT:    lw ra, 12(sp)
+; RV32F-NEXT:    call __muldf3 at plt
+; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    addi sp, sp, 16
 ; RV32F-NEXT:    ret
 ;
@@ -230,17 +230,17 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV64F-LABEL: bitcast_double_xor:
 ; RV64F:       # %bb.0:
 ; RV64F-NEXT:    addi sp, sp, -16
-; RV64F-NEXT:    sd ra, 8(sp)
-; RV64F-NEXT:    sd s0, 0(sp)
+; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
-; RV64F-NEXT:    call __muldf3
+; RV64F-NEXT:    call __muldf3 at plt
 ; RV64F-NEXT:    addi a1, zero, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    xor a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
-; RV64F-NEXT:    call __muldf3
-; RV64F-NEXT:    ld s0, 0(sp)
-; RV64F-NEXT:    ld ra, 8(sp)
+; RV64F-NEXT:    call __muldf3 at plt
+; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    addi sp, sp, 16
 ; RV64F-NEXT:    ret
 ;
@@ -317,21 +317,21 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV32F-LABEL: bitcast_double_or:
 ; RV32F:       # %bb.0:
 ; RV32F-NEXT:    addi sp, sp, -16
-; RV32F-NEXT:    sw ra, 12(sp)
-; RV32F-NEXT:    sw s0, 8(sp)
-; RV32F-NEXT:    sw s1, 4(sp)
+; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32F-NEXT:    mv s0, a1
 ; RV32F-NEXT:    mv s1, a0
-; RV32F-NEXT:    call __muldf3
+; RV32F-NEXT:    call __muldf3 at plt
 ; RV32F-NEXT:    mv a2, a0
 ; RV32F-NEXT:    lui a0, 524288
 ; RV32F-NEXT:    or a3, a1, a0
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
-; RV32F-NEXT:    call __muldf3
-; RV32F-NEXT:    lw s1, 4(sp)
-; RV32F-NEXT:    lw s0, 8(sp)
-; RV32F-NEXT:    lw ra, 12(sp)
+; RV32F-NEXT:    call __muldf3 at plt
+; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32F-NEXT:    addi sp, sp, 16
 ; RV32F-NEXT:    ret
 ;
@@ -357,17 +357,17 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV64F-LABEL: bitcast_double_or:
 ; RV64F:       # %bb.0:
 ; RV64F-NEXT:    addi sp, sp, -16
-; RV64F-NEXT:    sd ra, 8(sp)
-; RV64F-NEXT:    sd s0, 0(sp)
+; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
-; RV64F-NEXT:    call __muldf3
+; RV64F-NEXT:    call __muldf3 at plt
 ; RV64F-NEXT:    addi a1, zero, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    or a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
-; RV64F-NEXT:    call __muldf3
-; RV64F-NEXT:    ld s0, 0(sp)
-; RV64F-NEXT:    ld ra, 8(sp)
+; RV64F-NEXT:    call __muldf3 at plt
+; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64F-NEXT:    addi sp, sp, 16
 ; RV64F-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index dede086fed85..e30f930b6ffb 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -12,28 +12,28 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_false:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    addi a0, zero, 1
 ; RV32IF-NEXT:    bnez a0, .LBB0_2
 ; RV32IF-NEXT:  # %bb.1: # %if.then
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB0_2: # %if.else
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_false:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    addi a0, zero, 1
 ; RV64IF-NEXT:    bnez a0, .LBB0_2
 ; RV64IF-NEXT:  # %bb.1: # %if.then
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB0_2: # %if.else
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp false float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -47,32 +47,32 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_oeq:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    feq.s a0, ft1, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB1_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB1_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_oeq:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    feq.s a0, ft1, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB1_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB1_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp oeq float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -89,34 +89,34 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_oeq_alt:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    feq.s a0, ft1, ft0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    beqz a0, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB2_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_oeq_alt:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    feq.s a0, ft1, ft0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    beqz a0, .LBB2_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB2_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp oeq float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -130,32 +130,32 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ogt:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB3_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ogt:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB3_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB3_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ogt float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -169,32 +169,32 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_oge:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB4_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB4_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_oge:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB4_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB4_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp oge float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -208,32 +208,32 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_olt:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB5_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB5_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_olt:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB5_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB5_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp olt float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -247,32 +247,32 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ole:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB6_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB6_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ole:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB6_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB6_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ole float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -287,7 +287,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_one:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    feq.s a0, ft1, ft1
@@ -298,16 +298,16 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    bnez a0, .LBB7_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB7_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_one:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    feq.s a0, ft1, ft1
@@ -318,11 +318,11 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    bnez a0, .LBB7_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB7_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp one float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -336,7 +336,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ord:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    feq.s a0, ft1, ft1
@@ -344,16 +344,16 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    bnez a0, .LBB8_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB8_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ord:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    feq.s a0, ft1, ft1
@@ -361,11 +361,11 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    bnez a0, .LBB8_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB8_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ord float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -379,7 +379,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ueq:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    feq.s a0, ft1, ft0
@@ -390,16 +390,16 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    bnez a0, .LBB9_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB9_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ueq:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    feq.s a0, ft1, ft0
@@ -410,11 +410,11 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    or a0, a0, a1
 ; RV64IF-NEXT:    bnez a0, .LBB9_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB9_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ueq float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -428,34 +428,34 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ugt:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB10_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB10_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ugt:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB10_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB10_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ugt float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -469,34 +469,34 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_uge:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB11_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_uge:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB11_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB11_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp uge float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -510,34 +510,34 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ult:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB12_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB12_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ult:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB12_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ult float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -551,34 +551,34 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ule:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB13_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB13_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_ule:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB13_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB13_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp ule float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -592,34 +592,34 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_une:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    feq.s a0, ft1, ft0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB14_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB14_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_une:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    feq.s a0, ft1, ft0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB14_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp une float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -634,7 +634,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_uno:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    feq.s a0, ft1, ft1
@@ -643,16 +643,16 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    seqz a0, a0
 ; RV32IF-NEXT:    bnez a0, .LBB15_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB15_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_uno:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    feq.s a0, ft1, ft1
@@ -661,11 +661,11 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    seqz a0, a0
 ; RV64IF-NEXT:    bnez a0, .LBB15_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB15_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp uno float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -679,28 +679,28 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_true:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    addi a0, zero, 1
 ; RV32IF-NEXT:    bnez a0, .LBB16_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB16_2: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_true:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    addi a0, zero, 1
 ; RV64IF-NEXT:    bnez a0, .LBB16_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB16_2: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
   %1 = fcmp true float %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -717,58 +717,58 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_store_load_stack_slot:
 ; RV32IF:       # %bb.0: # %entry
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv a0, zero
-; RV32IF-NEXT:    call dummy
+; RV32IF-NEXT:    call dummy at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, zero
-; RV32IF-NEXT:    fsw ft1, 8(sp)
+; RV32IF-NEXT:    fsw ft1, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.1: # %if.end
 ; RV32IF-NEXT:    mv a0, zero
-; RV32IF-NEXT:    call dummy
+; RV32IF-NEXT:    call dummy at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flw ft1, 8(sp)
+; RV32IF-NEXT:    flw ft1, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.2: # %if.end4
 ; RV32IF-NEXT:    mv a0, zero
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB17_3: # %if.then
-; RV32IF-NEXT:    call abort
+; RV32IF-NEXT:    call abort at plt
 ;
 ; RV64IF-LABEL: br_fcmp_store_load_stack_slot:
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -32
-; RV64IF-NEXT:    sd ra, 24(sp)
-; RV64IF-NEXT:    sd s0, 16(sp)
+; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, zero
-; RV64IF-NEXT:    fsw ft0, 12(sp)
+; RV64IF-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.x.w s0, ft0
 ; RV64IF-NEXT:    mv a0, s0
-; RV64IF-NEXT:    call dummy
+; RV64IF-NEXT:    call dummy at plt
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp)
+; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    feq.s a0, ft0, ft1
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.1: # %if.end
 ; RV64IF-NEXT:    mv a0, s0
-; RV64IF-NEXT:    call dummy
+; RV64IF-NEXT:    call dummy at plt
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp)
+; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    feq.s a0, ft0, ft1
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.2: # %if.end4
 ; RV64IF-NEXT:    mv a0, zero
-; RV64IF-NEXT:    ld s0, 16(sp)
-; RV64IF-NEXT:    ld ra, 24(sp)
+; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 32
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB17_3: # %if.then
-; RV64IF-NEXT:    call abort
+; RV64IF-NEXT:    call abort at plt
 entry:
   %call = call float @dummy(float 0.000000e+00)
   %cmp = fcmp une float %call, 0.000000e+00

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 35a71c4ee591..c160ae5d8cd5 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -121,9 +121,9 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_l_s:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call __fixsfdi
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
@@ -140,9 +140,9 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_lu_s:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call __fixunssfdi
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
@@ -159,9 +159,9 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_l:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call __floatdisf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call __floatdisf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
@@ -178,9 +178,9 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_lu:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call __floatundisf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call __floatundisf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/float-frem.ll b/llvm/test/CodeGen/RISCV/float-frem.ll
index 95042c5fde6e..8c059e082f4b 100644
--- a/llvm/test/CodeGen/RISCV/float-frem.ll
+++ b/llvm/test/CodeGen/RISCV/float-frem.ll
@@ -6,9 +6,9 @@ define float @frem_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: frem_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call fmodf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call fmodf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
   %1 = frem float %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 85d1564d1a99..eb0d77ed0423 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -34,19 +34,19 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV32IF-LABEL: powi_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call __powisf2
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call __powisf2 at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: powi_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    sext.w a1, a1
-; RV64IF-NEXT:    call __powisf2
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    call __powisf2 at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.powi.f32(float %a, i32 %b)
@@ -59,18 +59,18 @@ define float @sin_f32(float %a) nounwind {
 ; RV32IF-LABEL: sin_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call sinf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call sinf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: sin_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call sinf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call sinf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.sin.f32(float %a)
@@ -83,18 +83,18 @@ define float @cos_f32(float %a) nounwind {
 ; RV32IF-LABEL: cos_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call cosf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call cosf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: cos_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call cosf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call cosf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.cos.f32(float %a)
@@ -106,40 +106,40 @@ define float @sincos_f32(float %a) nounwind {
 ; RV32IF-LABEL: sincos_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    sw s0, 8(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a0
-; RV32IF-NEXT:    call sinf
+; RV32IF-NEXT:    call sinf at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsw ft0, 4(sp)
+; RV32IF-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv a0, s0
-; RV32IF-NEXT:    call cosf
+; RV32IF-NEXT:    call cosf at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flw ft1, 4(sp)
+; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
 ; RV32IF-NEXT:    fmv.x.w a0, ft0
-; RV32IF-NEXT:    lw s0, 8(sp)
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: sincos_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -32
-; RV64IF-NEXT:    sd ra, 24(sp)
-; RV64IF-NEXT:    sd s0, 16(sp)
+; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    mv s0, a0
-; RV64IF-NEXT:    call sinf
+; RV64IF-NEXT:    call sinf at plt
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsw ft0, 12(sp)
+; RV64IF-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    mv a0, s0
-; RV64IF-NEXT:    call cosf
+; RV64IF-NEXT:    call cosf at plt
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp)
+; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
 ; RV64IF-NEXT:    fmv.x.w a0, ft0
-; RV64IF-NEXT:    ld s0, 16(sp)
-; RV64IF-NEXT:    ld ra, 24(sp)
+; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 32
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.sin.f32(float %a)
@@ -154,18 +154,18 @@ define float @pow_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: pow_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call powf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call powf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: pow_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call powf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call powf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.pow.f32(float %a, float %b)
@@ -178,18 +178,18 @@ define float @exp_f32(float %a) nounwind {
 ; RV32IF-LABEL: exp_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call expf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call expf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: exp_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call expf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call expf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.exp.f32(float %a)
@@ -202,18 +202,18 @@ define float @exp2_f32(float %a) nounwind {
 ; RV32IF-LABEL: exp2_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call exp2f
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call exp2f at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: exp2_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call exp2f
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call exp2f at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.exp2.f32(float %a)
@@ -226,18 +226,18 @@ define float @log_f32(float %a) nounwind {
 ; RV32IF-LABEL: log_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call logf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call logf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: log_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call logf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call logf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.log.f32(float %a)
@@ -250,18 +250,18 @@ define float @log10_f32(float %a) nounwind {
 ; RV32IF-LABEL: log10_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call log10f
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call log10f at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: log10_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call log10f
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call log10f at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.log10.f32(float %a)
@@ -274,18 +274,18 @@ define float @log2_f32(float %a) nounwind {
 ; RV32IF-LABEL: log2_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call log2f
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call log2f at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: log2_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call log2f
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call log2f at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.log2.f32(float %a)
@@ -449,18 +449,18 @@ define float @floor_f32(float %a) nounwind {
 ; RV32IF-LABEL: floor_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call floorf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call floorf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: floor_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call floorf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call floorf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.floor.f32(float %a)
@@ -473,18 +473,18 @@ define float @ceil_f32(float %a) nounwind {
 ; RV32IF-LABEL: ceil_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call ceilf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call ceilf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: ceil_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call ceilf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call ceilf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.ceil.f32(float %a)
@@ -497,18 +497,18 @@ define float @trunc_f32(float %a) nounwind {
 ; RV32IF-LABEL: trunc_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call truncf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call truncf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: trunc_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call truncf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call truncf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.trunc.f32(float %a)
@@ -521,18 +521,18 @@ define float @rint_f32(float %a) nounwind {
 ; RV32IF-LABEL: rint_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call rintf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call rintf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: rint_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call rintf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call rintf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.rint.f32(float %a)
@@ -545,18 +545,18 @@ define float @nearbyint_f32(float %a) nounwind {
 ; RV32IF-LABEL: nearbyint_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call nearbyintf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call nearbyintf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: nearbyint_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call nearbyintf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call nearbyintf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.nearbyint.f32(float %a)
@@ -569,18 +569,18 @@ define float @round_f32(float %a) nounwind {
 ; RV32IF-LABEL: round_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    call roundf
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call roundf at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: round_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call roundf
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call roundf at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.round.f32(float %a)

diff  --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index c989235e276f..c609f6c0a88f 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -134,32 +134,32 @@ define float @flw_stack(float %a) nounwind {
 ; RV32IF-LABEL: flw_stack:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsw ft0, 4(sp)
+; RV32IF-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call notdead
+; RV32IF-NEXT:    call notdead at plt
 ; RV32IF-NEXT:    flw ft0, 8(sp)
-; RV32IF-NEXT:    flw ft1, 4(sp)
+; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    fadd.s ft0, ft0, ft1
 ; RV32IF-NEXT:    fmv.x.w a0, ft0
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: flw_stack:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsw ft0, 0(sp)
+; RV64IF-NEXT:    fsw ft0, 0(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    addi a0, sp, 4
-; RV64IF-NEXT:    call notdead
+; RV64IF-NEXT:    call notdead at plt
 ; RV64IF-NEXT:    flw ft0, 4(sp)
-; RV64IF-NEXT:    flw ft1, 0(sp)
+; RV64IF-NEXT:    flw ft1, 0(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    fadd.s ft0, ft0, ft1
 ; RV64IF-NEXT:    fmv.x.w a0, ft0
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = alloca float, align 4
@@ -174,28 +174,28 @@ define void @fsw_stack(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fsw_stack:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
 ; RV32IF-NEXT:    fsw ft0, 8(sp)
 ; RV32IF-NEXT:    addi a0, sp, 8
-; RV32IF-NEXT:    call notdead
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    call notdead at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsw_stack:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
 ; RV64IF-NEXT:    fsw ft0, 4(sp)
 ; RV64IF-NEXT:    addi a0, sp, 4
-; RV64IF-NEXT:    call notdead
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    call notdead at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = fadd float %a, %b ; force store from FPR32

diff  --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll
index 81a19d065ac5..cea4e4064452 100644
--- a/llvm/test/CodeGen/RISCV/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/fp128.ll
@@ -12,7 +12,7 @@ define i32 @test_load_and_cmp() nounwind {
 ; RV32I-LABEL: test_load_and_cmp:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a0, %hi(x)
 ; RV32I-NEXT:    lw a6, %lo(x)(a0)
 ; RV32I-NEXT:    lw a7, %lo(x+4)(a0)
@@ -33,9 +33,9 @@ define i32 @test_load_and_cmp() nounwind {
 ; RV32I-NEXT:    addi a0, sp, 24
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    sw a6, 24(sp)
-; RV32I-NEXT:    call __netf2
+; RV32I-NEXT:    call __netf2 at plt
 ; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
   %1 = load fp128, fp128* @x, align 16
@@ -49,7 +49,7 @@ define i32 @test_add_and_fptosi() nounwind {
 ; RV32I-LABEL: test_add_and_fptosi:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -80
-; RV32I-NEXT:    sw ra, 76(sp)
+; RV32I-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a0, %hi(x)
 ; RV32I-NEXT:    lw a6, %lo(x)(a0)
 ; RV32I-NEXT:    lw a7, %lo(x+4)(a0)
@@ -71,7 +71,7 @@ define i32 @test_add_and_fptosi() nounwind {
 ; RV32I-NEXT:    addi a1, sp, 40
 ; RV32I-NEXT:    addi a2, sp, 24
 ; RV32I-NEXT:    sw a6, 40(sp)
-; RV32I-NEXT:    call __addtf3
+; RV32I-NEXT:    call __addtf3 at plt
 ; RV32I-NEXT:    lw a1, 56(sp)
 ; RV32I-NEXT:    lw a0, 60(sp)
 ; RV32I-NEXT:    lw a2, 64(sp)
@@ -81,8 +81,8 @@ define i32 @test_add_and_fptosi() nounwind {
 ; RV32I-NEXT:    sw a0, 12(sp)
 ; RV32I-NEXT:    addi a0, sp, 8
 ; RV32I-NEXT:    sw a1, 8(sp)
-; RV32I-NEXT:    call __fixtfsi
-; RV32I-NEXT:    lw ra, 76(sp)
+; RV32I-NEXT:    call __fixtfsi at plt
+; RV32I-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 80
 ; RV32I-NEXT:    ret
   %1 = load fp128, fp128* @x, align 16

diff  --git a/llvm/test/CodeGen/RISCV/fp16-promote.ll b/llvm/test/CodeGen/RISCV/fp16-promote.ll
index c99fc1245aae..c7b67b28bb00 100644
--- a/llvm/test/CodeGen/RISCV/fp16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/fp16-promote.ll
@@ -16,10 +16,10 @@ define float @test_fpextend_float(half* %p) nounwind {
 ; CHECK-LABEL: test_fpextend_float:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw ra, 12(sp)
+; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lhu a0, 0(a0)
-; CHECK-NEXT:    call __gnu_h2f_ieee
-; CHECK-NEXT:    lw ra, 12(sp)
+; CHECK-NEXT:    call __gnu_h2f_ieee at plt
+; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %a = load half, half* %p
@@ -31,11 +31,11 @@ define double @test_fpextend_double(half* %p) nounwind {
 ; CHECK-LABEL: test_fpextend_double:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw ra, 12(sp)
+; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lhu a0, 0(a0)
-; CHECK-NEXT:    call __gnu_h2f_ieee
+; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fcvt.d.s fa0, fa0
-; CHECK-NEXT:    lw ra, 12(sp)
+; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %a = load half, half* %p
@@ -47,13 +47,13 @@ define void @test_fptrunc_float(float %f, half* %p) nounwind {
 ; CHECK-LABEL: test_fptrunc_float:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw ra, 12(sp)
-; CHECK-NEXT:    sw s0, 8(sp)
+; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a0
-; CHECK-NEXT:    call __gnu_f2h_ieee
+; CHECK-NEXT:    call __gnu_f2h_ieee at plt
 ; CHECK-NEXT:    sh a0, 0(s0)
-; CHECK-NEXT:    lw s0, 8(sp)
-; CHECK-NEXT:    lw ra, 12(sp)
+; CHECK-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %a = fptrunc float %f to half
@@ -65,13 +65,13 @@ define void @test_fptrunc_double(double %d, half* %p) nounwind {
 ; CHECK-LABEL: test_fptrunc_double:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw ra, 12(sp)
-; CHECK-NEXT:    sw s0, 8(sp)
+; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a0
-; CHECK-NEXT:    call __truncdfhf2
+; CHECK-NEXT:    call __truncdfhf2 at plt
 ; CHECK-NEXT:    sh a0, 0(s0)
-; CHECK-NEXT:    lw s0, 8(sp)
-; CHECK-NEXT:    lw ra, 12(sp)
+; CHECK-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %a = fptrunc double %d to half
@@ -83,24 +83,24 @@ define void @test_fadd(half* %p, half* %q) nounwind {
 ; CHECK-LABEL: test_fadd:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -32
-; CHECK-NEXT:    sw ra, 28(sp)
-; CHECK-NEXT:    sw s0, 24(sp)
-; CHECK-NEXT:    sw s1, 20(sp)
-; CHECK-NEXT:    fsd fs0, 8(sp)
+; CHECK-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a1
 ; CHECK-NEXT:    mv s1, a0
 ; CHECK-NEXT:    lhu a0, 0(a0)
-; CHECK-NEXT:    call __gnu_h2f_ieee
+; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fmv.s fs0, fa0
 ; CHECK-NEXT:    lhu a0, 0(s0)
-; CHECK-NEXT:    call __gnu_h2f_ieee
+; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fadd.s fa0, fs0, fa0
-; CHECK-NEXT:    call __gnu_f2h_ieee
+; CHECK-NEXT:    call __gnu_f2h_ieee at plt
 ; CHECK-NEXT:    sh a0, 0(s1)
-; CHECK-NEXT:    fld fs0, 8(sp)
-; CHECK-NEXT:    lw s1, 20(sp)
-; CHECK-NEXT:    lw s0, 24(sp)
-; CHECK-NEXT:    lw ra, 28(sp)
+; CHECK-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:    ret
   %a = load half, half* %p
@@ -114,24 +114,24 @@ define void @test_fmul(half* %p, half* %q) nounwind {
 ; CHECK-LABEL: test_fmul:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -32
-; CHECK-NEXT:    sw ra, 28(sp)
-; CHECK-NEXT:    sw s0, 24(sp)
-; CHECK-NEXT:    sw s1, 20(sp)
-; CHECK-NEXT:    fsd fs0, 8(sp)
+; CHECK-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    mv s0, a1
 ; CHECK-NEXT:    mv s1, a0
 ; CHECK-NEXT:    lhu a0, 0(a0)
-; CHECK-NEXT:    call __gnu_h2f_ieee
+; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fmv.s fs0, fa0
 ; CHECK-NEXT:    lhu a0, 0(s0)
-; CHECK-NEXT:    call __gnu_h2f_ieee
+; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fmul.s fa0, fs0, fa0
-; CHECK-NEXT:    call __gnu_f2h_ieee
+; CHECK-NEXT:    call __gnu_f2h_ieee at plt
 ; CHECK-NEXT:    sh a0, 0(s1)
-; CHECK-NEXT:    fld fs0, 8(sp)
-; CHECK-NEXT:    lw s1, 20(sp)
-; CHECK-NEXT:    lw s0, 24(sp)
-; CHECK-NEXT:    lw ra, 28(sp)
+; CHECK-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:    ret
   %a = load half, half* %p

diff  --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll
index 0b1c1bde05e5..358b075ee75b 100644
--- a/llvm/test/CodeGen/RISCV/frame-info.ll
+++ b/llvm/test/CodeGen/RISCV/frame-info.ll
@@ -19,14 +19,14 @@ define void @trivial() {
 ; RV32-WITHFP:       # %bb.0:
 ; RV32-WITHFP-NEXT:    addi sp, sp, -16
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 16
-; RV32-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-WITHFP-NEXT:    .cfi_offset ra, -4
 ; RV32-WITHFP-NEXT:    .cfi_offset s0, -8
 ; RV32-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 0
-; RV32-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32-WITHFP-NEXT:    ret
 ;
@@ -34,14 +34,14 @@ define void @trivial() {
 ; RV64-WITHFP:       # %bb.0:
 ; RV64-WITHFP-NEXT:    addi sp, sp, -16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 16
-; RV64-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-WITHFP-NEXT:    .cfi_offset ra, -8
 ; RV64-WITHFP-NEXT:    .cfi_offset s0, -16
 ; RV64-WITHFP-NEXT:    addi s0, sp, 16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 0
-; RV64-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64-WITHFP-NEXT:    ret
   ret void
@@ -52,8 +52,8 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp)
-; RV32-NEXT:    sw s0, 8(sp)
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 16
@@ -62,10 +62,10 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-NEXT:    andi a0, a0, -16
 ; RV32-NEXT:    sub a0, sp, a0
 ; RV32-NEXT:    mv sp, a0
-; RV32-NEXT:    call callee_with_args
+; RV32-NEXT:    call callee_with_args at plt
 ; RV32-NEXT:    addi sp, s0, -16
-; RV32-NEXT:    lw s0, 8(sp)
-; RV32-NEXT:    lw ra, 12(sp)
+; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
@@ -73,8 +73,8 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp)
-; RV64-NEXT:    sd s0, 0(sp)
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    addi s0, sp, 16
@@ -88,10 +88,10 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    sub a0, sp, a0
 ; RV64-NEXT:    mv sp, a0
-; RV64-NEXT:    call callee_with_args
+; RV64-NEXT:    call callee_with_args at plt
 ; RV64-NEXT:    addi sp, s0, -16
-; RV64-NEXT:    ld s0, 0(sp)
-; RV64-NEXT:    ld ra, 8(sp)
+; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ;
@@ -99,8 +99,8 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-WITHFP:       # %bb.0: # %entry
 ; RV32-WITHFP-NEXT:    addi sp, sp, -16
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 16
-; RV32-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-WITHFP-NEXT:    .cfi_offset ra, -4
 ; RV32-WITHFP-NEXT:    .cfi_offset s0, -8
 ; RV32-WITHFP-NEXT:    addi s0, sp, 16
@@ -109,10 +109,10 @@ define void @stack_alloc(i32 signext %size) {
 ; RV32-WITHFP-NEXT:    andi a0, a0, -16
 ; RV32-WITHFP-NEXT:    sub a0, sp, a0
 ; RV32-WITHFP-NEXT:    mv sp, a0
-; RV32-WITHFP-NEXT:    call callee_with_args
+; RV32-WITHFP-NEXT:    call callee_with_args at plt
 ; RV32-WITHFP-NEXT:    addi sp, s0, -16
-; RV32-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32-WITHFP-NEXT:    ret
 ;
@@ -120,8 +120,8 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-WITHFP:       # %bb.0: # %entry
 ; RV64-WITHFP-NEXT:    addi sp, sp, -16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 16
-; RV64-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-WITHFP-NEXT:    .cfi_offset ra, -8
 ; RV64-WITHFP-NEXT:    .cfi_offset s0, -16
 ; RV64-WITHFP-NEXT:    addi s0, sp, 16
@@ -135,10 +135,10 @@ define void @stack_alloc(i32 signext %size) {
 ; RV64-WITHFP-NEXT:    and a0, a0, a1
 ; RV64-WITHFP-NEXT:    sub a0, sp, a0
 ; RV64-WITHFP-NEXT:    mv sp, a0
-; RV64-WITHFP-NEXT:    call callee_with_args
+; RV64-WITHFP-NEXT:    call callee_with_args at plt
 ; RV64-WITHFP-NEXT:    addi sp, s0, -16
-; RV64-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64-WITHFP-NEXT:    ret
 entry:
@@ -152,17 +152,17 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp)
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    andi a0, a0, 1
 ; RV32-NEXT:    beqz a0, .LBB2_2
 ; RV32-NEXT:  # %bb.1: # %blue_pill
-; RV32-NEXT:    lw ra, 12(sp)
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    tail callee1
+; RV32-NEXT:    tail callee1 at plt
 ; RV32-NEXT:  .LBB2_2: # %red_pill
-; RV32-NEXT:    call callee2
-; RV32-NEXT:    lw ra, 12(sp)
+; RV32-NEXT:    call callee2 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
@@ -170,17 +170,17 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp)
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    andi a0, a0, 1
 ; RV64-NEXT:    beqz a0, .LBB2_2
 ; RV64-NEXT:  # %bb.1: # %blue_pill
-; RV64-NEXT:    ld ra, 8(sp)
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
-; RV64-NEXT:    tail callee1
+; RV64-NEXT:    tail callee1 at plt
 ; RV64-NEXT:  .LBB2_2: # %red_pill
-; RV64-NEXT:    call callee2
-; RV64-NEXT:    ld ra, 8(sp)
+; RV64-NEXT:    call callee2 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ;
@@ -188,8 +188,8 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-WITHFP:       # %bb.0:
 ; RV32-WITHFP-NEXT:    addi sp, sp, -16
 ; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 16
-; RV32-WITHFP-NEXT:    sw ra, 12(sp)
-; RV32-WITHFP-NEXT:    sw s0, 8(sp)
+; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32-WITHFP-NEXT:    .cfi_offset ra, -4
 ; RV32-WITHFP-NEXT:    .cfi_offset s0, -8
 ; RV32-WITHFP-NEXT:    addi s0, sp, 16
@@ -197,14 +197,14 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV32-WITHFP-NEXT:    andi a0, a0, 1
 ; RV32-WITHFP-NEXT:    beqz a0, .LBB2_2
 ; RV32-WITHFP-NEXT:  # %bb.1: # %blue_pill
-; RV32-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
-; RV32-WITHFP-NEXT:    tail callee1
+; RV32-WITHFP-NEXT:    tail callee1 at plt
 ; RV32-WITHFP-NEXT:  .LBB2_2: # %red_pill
-; RV32-WITHFP-NEXT:    call callee2
-; RV32-WITHFP-NEXT:    lw s0, 8(sp)
-; RV32-WITHFP-NEXT:    lw ra, 12(sp)
+; RV32-WITHFP-NEXT:    call callee2 at plt
+; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-WITHFP-NEXT:    addi sp, sp, 16
 ; RV32-WITHFP-NEXT:    ret
 ;
@@ -212,8 +212,8 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-WITHFP:       # %bb.0:
 ; RV64-WITHFP-NEXT:    addi sp, sp, -16
 ; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 16
-; RV64-WITHFP-NEXT:    sd ra, 8(sp)
-; RV64-WITHFP-NEXT:    sd s0, 0(sp)
+; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64-WITHFP-NEXT:    .cfi_offset ra, -8
 ; RV64-WITHFP-NEXT:    .cfi_offset s0, -16
 ; RV64-WITHFP-NEXT:    addi s0, sp, 16
@@ -221,14 +221,14 @@ define void @branch_and_tail_call(i1 %a) {
 ; RV64-WITHFP-NEXT:    andi a0, a0, 1
 ; RV64-WITHFP-NEXT:    beqz a0, .LBB2_2
 ; RV64-WITHFP-NEXT:  # %bb.1: # %blue_pill
-; RV64-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
-; RV64-WITHFP-NEXT:    tail callee1
+; RV64-WITHFP-NEXT:    tail callee1 at plt
 ; RV64-WITHFP-NEXT:  .LBB2_2: # %red_pill
-; RV64-WITHFP-NEXT:    call callee2
-; RV64-WITHFP-NEXT:    ld s0, 0(sp)
-; RV64-WITHFP-NEXT:    ld ra, 8(sp)
+; RV64-WITHFP-NEXT:    call callee2 at plt
+; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-WITHFP-NEXT:    addi sp, sp, 16
 ; RV64-WITHFP-NEXT:    ret
   br i1 %a, label %blue_pill, label %red_pill

diff  --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll
index 6f6be73e7080..4ed4e118d8d1 100644
--- a/llvm/test/CodeGen/RISCV/frame.ll
+++ b/llvm/test/CodeGen/RISCV/frame.ll
@@ -10,24 +10,24 @@ define i32 @test() nounwind {
 ; RV32I-FPELIM-LABEL: test:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -32
-; RV32I-FPELIM-NEXT:    sw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    sw zero, 24(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 20(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 16(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 12(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 8(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 12
-; RV32I-FPELIM-NEXT:    call test1
+; RV32I-FPELIM-NEXT:    call test1 at plt
 ; RV32I-FPELIM-NEXT:    mv a0, zero
-; RV32I-FPELIM-NEXT:    lw ra, 28(sp)
+; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: test:
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -32
-; RV32I-WITHFP-NEXT:    sw ra, 28(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 24(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV32I-WITHFP-NEXT:    sw zero, -16(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -20(s0)
@@ -35,10 +35,10 @@ define i32 @test() nounwind {
 ; RV32I-WITHFP-NEXT:    sw zero, -28(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -32(s0)
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -28
-; RV32I-WITHFP-NEXT:    call test1
+; RV32I-WITHFP-NEXT:    call test1 at plt
 ; RV32I-WITHFP-NEXT:    mv a0, zero
-; RV32I-WITHFP-NEXT:    lw s0, 24(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 28(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV32I-WITHFP-NEXT:    ret
   %key = alloca %struct.key_t, align 4

diff  --git a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
index 255bf7155392..369cf7be0a77 100644
--- a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
+++ b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
@@ -12,24 +12,24 @@ define i8* @test_frameaddress_0() nounwind {
 ; RV32I-LABEL: test_frameaddress_0:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 16
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_frameaddress_0:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 16
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i8* @llvm.frameaddress(i32 0)
@@ -40,26 +40,26 @@ define i8* @test_frameaddress_2() nounwind {
 ; RV32I-LABEL: test_frameaddress_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 16
 ; RV32I-NEXT:    lw a0, -8(s0)
 ; RV32I-NEXT:    lw a0, -8(a0)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_frameaddress_2:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 16
 ; RV64I-NEXT:    ld a0, -16(s0)
 ; RV64I-NEXT:    ld a0, -16(a0)
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i8* @llvm.frameaddress(i32 2)
@@ -70,32 +70,32 @@ define i8* @test_frameaddress_3_alloca() nounwind {
 ; RV32I-LABEL: test_frameaddress_3_alloca:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -112
-; RV32I-NEXT:    sw ra, 108(sp)
-; RV32I-NEXT:    sw s0, 104(sp)
+; RV32I-NEXT:    sw ra, 108(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 104(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 112
 ; RV32I-NEXT:    addi a0, s0, -108
-; RV32I-NEXT:    call notdead
+; RV32I-NEXT:    call notdead at plt
 ; RV32I-NEXT:    lw a0, -8(s0)
 ; RV32I-NEXT:    lw a0, -8(a0)
 ; RV32I-NEXT:    lw a0, -8(a0)
-; RV32I-NEXT:    lw s0, 104(sp)
-; RV32I-NEXT:    lw ra, 108(sp)
+; RV32I-NEXT:    lw s0, 104(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 108(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 112
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_frameaddress_3_alloca:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -128
-; RV64I-NEXT:    sd ra, 120(sp)
-; RV64I-NEXT:    sd s0, 112(sp)
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 128
 ; RV64I-NEXT:    addi a0, s0, -116
-; RV64I-NEXT:    call notdead
+; RV64I-NEXT:    call notdead at plt
 ; RV64I-NEXT:    ld a0, -16(s0)
 ; RV64I-NEXT:    ld a0, -16(a0)
 ; RV64I-NEXT:    ld a0, -16(a0)
-; RV64I-NEXT:    ld s0, 112(sp)
-; RV64I-NEXT:    ld ra, 120(sp)
+; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
   %1 = alloca [100 x i8]
@@ -123,28 +123,28 @@ define i8* @test_returnaddress_2() nounwind {
 ; RV32I-LABEL: test_returnaddress_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 16
 ; RV32I-NEXT:    lw a0, -8(s0)
 ; RV32I-NEXT:    lw a0, -8(a0)
 ; RV32I-NEXT:    lw a0, -4(a0)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_returnaddress_2:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 16
 ; RV64I-NEXT:    ld a0, -16(s0)
 ; RV64I-NEXT:    ld a0, -16(a0)
 ; RV64I-NEXT:    ld a0, -8(a0)
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call i8* @llvm.returnaddress(i32 2)

diff  --git a/llvm/test/CodeGen/RISCV/ghccc-rv32.ll b/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
index 1d7b05ba3b9a..278dd0e2897a 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
@@ -78,7 +78,7 @@ define ghccc void @foo() nounwind {
 ; CHECK-NEXT:    lw s2, %lo(sp)(a0)
 ; CHECK-NEXT:    lui a0, %hi(base)
 ; CHECK-NEXT:    lw s1, %lo(base)(a0)
-; CHECK-NEXT:    tail bar
+; CHECK-NEXT:    tail bar at plt
 entry:
   %0  = load double, double* @d6
   %1  = load double, double* @d5

diff  --git a/llvm/test/CodeGen/RISCV/ghccc-rv64.ll b/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
index aaca2e102a79..7b206fa886cd 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
@@ -78,7 +78,7 @@ define ghccc void @foo() nounwind {
 ; CHECK-NEXT:    ld s2, %lo(sp)(a0)
 ; CHECK-NEXT:    lui a0, %hi(base)
 ; CHECK-NEXT:    ld s1, %lo(base)(a0)
-; CHECK-NEXT:    tail bar
+; CHECK-NEXT:    tail bar at plt
 entry:
   %0  = load double, double* @d6
   %1  = load double, double* @d5

diff  --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
index 0f192a2eabce..5fddeea99725 100644
--- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
@@ -82,13 +82,13 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ; RV32I-LABEL: fcopysign_fneg:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    call __gnu_h2f_ieee
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    not a1, s0
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    addi a2, a2, -1
@@ -97,9 +97,9 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ; RV32I-NEXT:    and a1, a1, a2
 ; RV32I-NEXT:    slli a1, a1, 16
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    call __gnu_f2h_ieee
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __gnu_f2h_ieee at plt
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -111,13 +111,13 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ; RV64I-LABEL: fcopysign_fneg:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    sd s0, 0(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    call __gnu_h2f_ieee
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    not a1, s0
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    addiw a2, a2, -1
@@ -129,9 +129,9 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    call __gnu_f2h_ieee
-; RV64I-NEXT:    ld s0, 0(sp)
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __gnu_f2h_ieee at plt
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index 0566928796f0..c03156771d66 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -12,28 +12,28 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_false:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    addi a0, zero, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB0_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.then
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB0_2: # %if.else
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_false:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    addi a0, zero, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB0_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.then
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB0_2: # %if.else
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp false half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -47,28 +47,28 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_oeq:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    bnez a0, .LBB1_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB1_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_oeq:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    bnez a0, .LBB1_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB1_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp oeq half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -85,30 +85,30 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_oeq_alt:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    xori a0, a0, 1
 ; RV32IZFH-NEXT:    beqz a0, .LBB2_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB2_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    xori a0, a0, 1
 ; RV64IZFH-NEXT:    beqz a0, .LBB2_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB2_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp oeq half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -122,28 +122,28 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ogt:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
 ; RV32IZFH-NEXT:    bnez a0, .LBB3_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB3_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ogt:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
 ; RV64IZFH-NEXT:    bnez a0, .LBB3_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB3_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ogt half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -157,28 +157,28 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_oge:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
 ; RV32IZFH-NEXT:    bnez a0, .LBB4_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB4_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_oge:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
 ; RV64IZFH-NEXT:    bnez a0, .LBB4_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB4_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp oge half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -192,28 +192,28 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_olt:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    bnez a0, .LBB5_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB5_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_olt:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    bnez a0, .LBB5_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB5_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp olt half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -227,28 +227,28 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ole:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    bnez a0, .LBB6_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB6_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ole:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    bnez a0, .LBB6_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB6_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ole half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -263,7 +263,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_one:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV32IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV32IZFH-NEXT:    and a0, a1, a0
@@ -272,16 +272,16 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    and a0, a1, a0
 ; RV32IZFH-NEXT:    bnez a0, .LBB7_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB7_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_one:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IZFH-NEXT:    and a0, a1, a0
@@ -290,11 +290,11 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    and a0, a1, a0
 ; RV64IZFH-NEXT:    bnez a0, .LBB7_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB7_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp one half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -308,32 +308,32 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ord:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV32IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV32IZFH-NEXT:    and a0, a1, a0
 ; RV32IZFH-NEXT:    bnez a0, .LBB8_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB8_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ord:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IZFH-NEXT:    and a0, a1, a0
 ; RV64IZFH-NEXT:    bnez a0, .LBB8_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB8_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ord half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -347,7 +347,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ueq:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    feq.h a1, fa1, fa1
 ; RV32IZFH-NEXT:    feq.h a2, fa0, fa0
@@ -356,16 +356,16 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    or a0, a0, a1
 ; RV32IZFH-NEXT:    bnez a0, .LBB9_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB9_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ueq:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    feq.h a1, fa1, fa1
 ; RV64IZFH-NEXT:    feq.h a2, fa0, fa0
@@ -374,11 +374,11 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    or a0, a0, a1
 ; RV64IZFH-NEXT:    bnez a0, .LBB9_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB9_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ueq half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -392,30 +392,30 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ugt:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    xori a0, a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB10_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB10_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ugt:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    xori a0, a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB10_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB10_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ugt half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -429,30 +429,30 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_uge:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    xori a0, a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB11_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB11_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_uge:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    xori a0, a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB11_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB11_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp uge half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -466,30 +466,30 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ult:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
 ; RV32IZFH-NEXT:    xori a0, a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB12_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB12_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ult:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
 ; RV64IZFH-NEXT:    xori a0, a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB12_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB12_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ult half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -503,30 +503,30 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_ule:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
 ; RV32IZFH-NEXT:    xori a0, a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB13_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB13_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_ule:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
 ; RV64IZFH-NEXT:    xori a0, a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB13_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB13_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp ule half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -540,30 +540,30 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_une:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    xori a0, a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB14_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB14_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_une:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    xori a0, a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB14_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB14_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp une half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -578,34 +578,34 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_uno:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV32IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV32IZFH-NEXT:    and a0, a1, a0
 ; RV32IZFH-NEXT:    seqz a0, a0
 ; RV32IZFH-NEXT:    bnez a0, .LBB15_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB15_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_uno:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IZFH-NEXT:    and a0, a1, a0
 ; RV64IZFH-NEXT:    seqz a0, a0
 ; RV64IZFH-NEXT:    bnez a0, .LBB15_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB15_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp uno half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -619,28 +619,28 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_true:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    addi a0, zero, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB16_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB16_2: # %if.then
-; RV32IZFH-NEXT:    call abort
+; RV32IZFH-NEXT:    call abort at plt
 ;
 ; RV64IZFH-LABEL: br_fcmp_true:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    addi a0, zero, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB16_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB16_2: # %if.then
-; RV64IZFH-NEXT:    call abort
+; RV64IZFH-NEXT:    call abort at plt
   %1 = fcmp true half %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index a9299f2d7ea6..7f4bd333a506 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -108,18 +108,18 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_l_h:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
-; RV32IZFH-NEXT:    call __fixhfdi
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_l_h:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
-; RV32IDZFH-NEXT:    sw ra, 12(sp)
-; RV32IDZFH-NEXT:    call __fixhfdi
-; RV32IDZFH-NEXT:    lw ra, 12(sp)
+; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IDZFH-NEXT:    call __fixhfdi at plt
+; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
 ;
@@ -140,18 +140,18 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_lu_h:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
-; RV32IZFH-NEXT:    call __fixunshfdi
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_lu_h:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
-; RV32IDZFH-NEXT:    sw ra, 12(sp)
-; RV32IDZFH-NEXT:    call __fixunshfdi
-; RV32IDZFH-NEXT:    lw ra, 12(sp)
+; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IDZFH-NEXT:    call __fixunshfdi at plt
+; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
 ;
@@ -288,18 +288,18 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_h_l:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
-; RV32IZFH-NEXT:    call __floatdihf
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    call __floatdihf at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_l:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
-; RV32IDZFH-NEXT:    sw ra, 12(sp)
-; RV32IDZFH-NEXT:    call __floatdihf
-; RV32IDZFH-NEXT:    lw ra, 12(sp)
+; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IDZFH-NEXT:    call __floatdihf at plt
+; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
 ;
@@ -320,18 +320,18 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_h_lu:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
-; RV32IZFH-NEXT:    call __floatundihf
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    call __floatundihf at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_lu:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi sp, sp, -16
-; RV32IDZFH-NEXT:    sw ra, 12(sp)
-; RV32IDZFH-NEXT:    call __floatundihf
-; RV32IDZFH-NEXT:    lw ra, 12(sp)
+; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IDZFH-NEXT:    call __floatundihf at plt
+; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    addi sp, sp, 16
 ; RV32IDZFH-NEXT:    ret
 ;
@@ -400,9 +400,9 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_h_d:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
-; RV32IZFH-NEXT:    call __truncdfhf2
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    call __truncdfhf2 at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
@@ -414,9 +414,9 @@ define half @fcvt_h_d(double %a) nounwind {
 ; RV64IZFH-LABEL: fcvt_h_d:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
-; RV64IZFH-NEXT:    call __truncdfhf2
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __truncdfhf2 at plt
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ;
@@ -432,10 +432,10 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_d_h:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    call __extendsfdf2
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    call __extendsfdf2 at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
@@ -447,10 +447,10 @@ define double @fcvt_d_h(half %a) nounwind {
 ; RV64IZFH-LABEL: fcvt_d_h:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT:    call __extendsfdf2
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    call __extendsfdf2 at plt
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index ded8b1803af0..dbef64c88c20 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -118,30 +118,30 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZFH-LABEL: flh_stack:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
-; RV32IZFH-NEXT:    fsw fs0, 8(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fmv.h fs0, fa0
 ; RV32IZFH-NEXT:    addi a0, sp, 4
-; RV32IZFH-NEXT:    call notdead
+; RV32IZFH-NEXT:    call notdead at plt
 ; RV32IZFH-NEXT:    flh ft0, 4(sp)
 ; RV32IZFH-NEXT:    fadd.h fa0, ft0, fs0
-; RV32IZFH-NEXT:    flw fs0, 8(sp)
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: flh_stack:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
-; RV64IZFH-NEXT:    fsw fs0, 4(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV64IZFH-NEXT:    fmv.h fs0, fa0
 ; RV64IZFH-NEXT:    mv a0, sp
-; RV64IZFH-NEXT:    call notdead
+; RV64IZFH-NEXT:    call notdead at plt
 ; RV64IZFH-NEXT:    flh ft0, 0(sp)
 ; RV64IZFH-NEXT:    fadd.h fa0, ft0, fs0
-; RV64IZFH-NEXT:    flw fs0, 4(sp)
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
   %1 = alloca half, align 4
@@ -156,24 +156,24 @@ define void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: fsh_stack:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp)
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fadd.h ft0, fa0, fa1
 ; RV32IZFH-NEXT:    fsh ft0, 8(sp)
 ; RV32IZFH-NEXT:    addi a0, sp, 8
-; RV32IZFH-NEXT:    call notdead
-; RV32IZFH-NEXT:    lw ra, 12(sp)
+; RV32IZFH-NEXT:    call notdead at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fsh_stack:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
-; RV64IZFH-NEXT:    sd ra, 8(sp)
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fadd.h ft0, fa0, fa1
 ; RV64IZFH-NEXT:    fsh ft0, 4(sp)
 ; RV64IZFH-NEXT:    addi a0, sp, 4
-; RV64IZFH-NEXT:    call notdead
-; RV64IZFH-NEXT:    ld ra, 8(sp)
+; RV64IZFH-NEXT:    call notdead at plt
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
 ; RV64IZFH-NEXT:    ret
   %1 = fadd half %a, %b ; force store from FPR16

diff  --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
index df8b31170695..38cac4d77066 100644
--- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
+++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
@@ -128,18 +128,18 @@ define dso_local i32 @load_half() nounwind {
 ; CHECK-LABEL: load_half:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw ra, 12(sp)
+; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lui a0, %hi(foo+8)
 ; CHECK-NEXT:    lhu a0, %lo(foo+8)(a0)
 ; CHECK-NEXT:    addi a1, zero, 140
 ; CHECK-NEXT:    bne a0, a1, .LBB7_2
 ; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    mv a0, zero
-; CHECK-NEXT:    lw ra, 12(sp)
+; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB7_2: # %if.then
-; CHECK-NEXT:    call abort
+; CHECK-NEXT:    call abort at plt
 entry:
   %0 = load i16, i16* getelementptr inbounds ([6 x i16], [6 x i16]* @foo, i32 0, i32 4), align 2
   %cmp = icmp eq i16 %0, 140

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll b/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
index f9ed4aed6ca3..8bfce389497b 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
@@ -59,24 +59,24 @@ define i32 @explicit_register_x1(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv ra, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, ra, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x1:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv ra, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, ra, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x1}"(i32 %a)
@@ -88,24 +88,24 @@ define i32 @explicit_register_ra(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_ra:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv ra, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, ra, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_ra:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv ra, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, ra, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{ra}"(i32 %a)
@@ -157,24 +157,24 @@ define i32 @explicit_register_x3(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x3:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw gp, 12(sp)
+; RV32I-NEXT:    sw gp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv gp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, gp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw gp, 12(sp)
+; RV32I-NEXT:    lw gp, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x3:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd gp, 8(sp)
+; RV64I-NEXT:    sd gp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv gp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, gp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld gp, 8(sp)
+; RV64I-NEXT:    ld gp, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x3}"(i32 %a)
@@ -186,24 +186,24 @@ define i32 @explicit_register_gp(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_gp:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw gp, 12(sp)
+; RV32I-NEXT:    sw gp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv gp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, gp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw gp, 12(sp)
+; RV32I-NEXT:    lw gp, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_gp:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd gp, 8(sp)
+; RV64I-NEXT:    sd gp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv gp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, gp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld gp, 8(sp)
+; RV64I-NEXT:    ld gp, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{gp}"(i32 %a)
@@ -215,24 +215,24 @@ define i32 @explicit_register_x4(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x4:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw tp, 12(sp)
+; RV32I-NEXT:    sw tp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv tp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, tp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw tp, 12(sp)
+; RV32I-NEXT:    lw tp, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x4:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd tp, 8(sp)
+; RV64I-NEXT:    sd tp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv tp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, tp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld tp, 8(sp)
+; RV64I-NEXT:    ld tp, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x4}"(i32 %a)
@@ -244,24 +244,24 @@ define i32 @explicit_register_tp(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_tp:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw tp, 12(sp)
+; RV32I-NEXT:    sw tp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv tp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, tp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw tp, 12(sp)
+; RV32I-NEXT:    lw tp, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_tp:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd tp, 8(sp)
+; RV64I-NEXT:    sd tp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv tp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, tp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld tp, 8(sp)
+; RV64I-NEXT:    ld tp, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{tp}"(i32 %a)
@@ -393,24 +393,24 @@ define i32 @explicit_register_x8(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s0, 12(sp)
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s0, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s0, 12(sp)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s0, 8(sp)
+; RV64I-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s0, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s0, 8(sp)
+; RV64I-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x8}"(i32 %a)
@@ -422,24 +422,24 @@ define i32 @explicit_register_s0(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s0:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s0, 12(sp)
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s0, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s0, 12(sp)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s0:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s0, 8(sp)
+; RV64I-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s0, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s0, 8(sp)
+; RV64I-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s0}"(i32 %a)
@@ -451,24 +451,24 @@ define i32 @explicit_register_fp(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_fp:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s0, 12(sp)
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s0, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s0, 12(sp)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_fp:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s0, 8(sp)
+; RV64I-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s0, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s0, 8(sp)
+; RV64I-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{fp}"(i32 %a)
@@ -480,24 +480,24 @@ define i32 @explicit_register_x9(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x9:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s1, 12(sp)
+; RV32I-NEXT:    sw s1, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s1, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s1, 12(sp)
+; RV32I-NEXT:    lw s1, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x9:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s1, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s1, 8(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x9}"(i32 %a)
@@ -509,24 +509,24 @@ define i32 @explicit_register_s1(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s1, 12(sp)
+; RV32I-NEXT:    sw s1, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s1, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s1, 12(sp)
+; RV32I-NEXT:    lw s1, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s1:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s1, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s1, 8(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s1}"(i32 %a)
@@ -854,24 +854,24 @@ define i32 @explicit_register_x18(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x18:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s2, 12(sp)
+; RV32I-NEXT:    sw s2, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s2, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s2, 12(sp)
+; RV32I-NEXT:    lw s2, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x18:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s2, 8(sp)
+; RV64I-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s2, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s2, 8(sp)
+; RV64I-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x18}"(i32 %a)
@@ -883,24 +883,24 @@ define i32 @explicit_register_s2(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s2, 12(sp)
+; RV32I-NEXT:    sw s2, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s2, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s2, 12(sp)
+; RV32I-NEXT:    lw s2, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s2:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s2, 8(sp)
+; RV64I-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s2, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s2, 8(sp)
+; RV64I-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s2}"(i32 %a)
@@ -912,24 +912,24 @@ define i32 @explicit_register_x19(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x19:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s3, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s3, 12(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x19:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s3, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s3, 8(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x19}"(i32 %a)
@@ -941,24 +941,24 @@ define i32 @explicit_register_s3(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s3:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s3, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s3, 12(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s3:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s3, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s3, 8(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s3}"(i32 %a)
@@ -970,24 +970,24 @@ define i32 @explicit_register_x20(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x20:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s4, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s4, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s4, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x20:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    sd s4, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s4, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s4, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x20}"(i32 %a)
@@ -999,24 +999,24 @@ define i32 @explicit_register_s4(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s4:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s4, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s4, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s4, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s4:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    sd s4, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s4, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s4, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s4}"(i32 %a)
@@ -1028,24 +1028,24 @@ define i32 @explicit_register_x21(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x21:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s5, 12(sp)
+; RV32I-NEXT:    sw s5, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s5, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s5, 12(sp)
+; RV32I-NEXT:    lw s5, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x21:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s5, 8(sp)
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s5, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s5, 8(sp)
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x21}"(i32 %a)
@@ -1057,24 +1057,24 @@ define i32 @explicit_register_s5(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s5:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s5, 12(sp)
+; RV32I-NEXT:    sw s5, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s5, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s5, 12(sp)
+; RV32I-NEXT:    lw s5, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s5:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s5, 8(sp)
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s5, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s5, 8(sp)
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s5}"(i32 %a)
@@ -1086,24 +1086,24 @@ define i32 @explicit_register_x22(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x22:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s6, 12(sp)
+; RV32I-NEXT:    sw s6, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s6, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s6, 12(sp)
+; RV32I-NEXT:    lw s6, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x22:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    sd s6, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s6, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s6, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s6, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x22}"(i32 %a)
@@ -1115,24 +1115,24 @@ define i32 @explicit_register_s6(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s6:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s6, 12(sp)
+; RV32I-NEXT:    sw s6, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s6, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s6, 12(sp)
+; RV32I-NEXT:    lw s6, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s6:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    sd s6, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s6, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s6, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s6, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s6}"(i32 %a)
@@ -1144,24 +1144,24 @@ define i32 @explicit_register_x23(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x23:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s7, 12(sp)
+; RV32I-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s7, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s7, 12(sp)
+; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x23:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s7, 8(sp)
+; RV64I-NEXT:    sd s7, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s7, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s7, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s7, 8(sp)
+; RV64I-NEXT:    ld s7, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x23}"(i32 %a)
@@ -1173,24 +1173,24 @@ define i32 @explicit_register_s7(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s7:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s7, 12(sp)
+; RV32I-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s7, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s7, 12(sp)
+; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s7:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s7, 8(sp)
+; RV64I-NEXT:    sd s7, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s7, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s7, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s7, 8(sp)
+; RV64I-NEXT:    ld s7, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s7}"(i32 %a)
@@ -1202,24 +1202,24 @@ define i32 @explicit_register_x24(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x24:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s8, 12(sp)
+; RV32I-NEXT:    sw s8, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s8, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s8, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s8, 12(sp)
+; RV32I-NEXT:    lw s8, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x24:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s8, 8(sp)
+; RV64I-NEXT:    sd s8, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s8, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s8, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s8, 8(sp)
+; RV64I-NEXT:    ld s8, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x24}"(i32 %a)
@@ -1231,24 +1231,24 @@ define i32 @explicit_register_s8(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s8, 12(sp)
+; RV32I-NEXT:    sw s8, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s8, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s8, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s8, 12(sp)
+; RV32I-NEXT:    lw s8, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s8, 8(sp)
+; RV64I-NEXT:    sd s8, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s8, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s8, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s8, 8(sp)
+; RV64I-NEXT:    ld s8, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s8}"(i32 %a)
@@ -1260,24 +1260,24 @@ define i32 @explicit_register_x25(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x25:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s9, 12(sp)
+; RV32I-NEXT:    sw s9, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s9, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s9, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s9, 12(sp)
+; RV32I-NEXT:    lw s9, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x25:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s9, 8(sp)
+; RV64I-NEXT:    sd s9, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s9, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s9, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s9, 8(sp)
+; RV64I-NEXT:    ld s9, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x25}"(i32 %a)
@@ -1289,24 +1289,24 @@ define i32 @explicit_register_s9(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s9:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s9, 12(sp)
+; RV32I-NEXT:    sw s9, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s9, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s9, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s9, 12(sp)
+; RV32I-NEXT:    lw s9, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s9:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s9, 8(sp)
+; RV64I-NEXT:    sd s9, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s9, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s9, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s9, 8(sp)
+; RV64I-NEXT:    ld s9, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s9}"(i32 %a)
@@ -1318,24 +1318,24 @@ define i32 @explicit_register_x26(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x26:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s10, 12(sp)
+; RV32I-NEXT:    sw s10, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s10, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s10, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s10, 12(sp)
+; RV32I-NEXT:    lw s10, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x26:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s10, 8(sp)
+; RV64I-NEXT:    sd s10, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s10, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s10, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s10, 8(sp)
+; RV64I-NEXT:    ld s10, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x26}"(i32 %a)
@@ -1347,24 +1347,24 @@ define i32 @explicit_register_s10(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s10:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s10, 12(sp)
+; RV32I-NEXT:    sw s10, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s10, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s10, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s10, 12(sp)
+; RV32I-NEXT:    lw s10, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s10:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s10, 8(sp)
+; RV64I-NEXT:    sd s10, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s10, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s10, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s10, 8(sp)
+; RV64I-NEXT:    ld s10, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s10}"(i32 %a)
@@ -1376,24 +1376,24 @@ define i32 @explicit_register_x27(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x27:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s11, 12(sp)
+; RV32I-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s11, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s11, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s11, 12(sp)
+; RV32I-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x27:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s11, 8(sp)
+; RV64I-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s11, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s11, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s11, 8(sp)
+; RV64I-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x27}"(i32 %a)
@@ -1405,24 +1405,24 @@ define i32 @explicit_register_s11(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_s11:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s11, 12(sp)
+; RV32I-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s11, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, s11, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw s11, 12(sp)
+; RV32I-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_s11:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd s11, 8(sp)
+; RV64I-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s11, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, s11, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld s11, 8(sp)
+; RV64I-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{s11}"(i32 %a)

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-d-abi-names.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-abi-names.ll
index bdce94828a16..20d3de4230f7 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-abi-names.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-abi-names.ll
@@ -341,24 +341,24 @@ define i32 @explicit_register_f8(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f8:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs0, 8(sp)
+; RV32IFD-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs0
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs0, 8(sp)
+; RV32IFD-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f8:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs0, 8(sp)
+; RV64IFD-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs0
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs0, 8(sp)
+; RV64IFD-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f8}"(double %a)
@@ -370,24 +370,24 @@ define i32 @explicit_register_fs0(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs0:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs0, 8(sp)
+; RV32IFD-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs0
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs0, 8(sp)
+; RV32IFD-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs0:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs0, 8(sp)
+; RV64IFD-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs0
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs0, 8(sp)
+; RV64IFD-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs0}"(double %a)
@@ -399,24 +399,24 @@ define i32 @explicit_register_f9(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f9:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs1, 8(sp)
+; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs1
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs1, 8(sp)
+; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f9:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs1, 8(sp)
+; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs1
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs1, 8(sp)
+; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f9}"(double %a)
@@ -428,24 +428,24 @@ define i32 @explicit_register_fs1(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs1:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs1, 8(sp)
+; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs1, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs1
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs1, 8(sp)
+; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs1:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs1, 8(sp)
+; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs1, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs1
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs1, 8(sp)
+; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs1}"(double %a)
@@ -773,24 +773,24 @@ define i32 @explicit_register_f18(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f18:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs2, 8(sp)
+; RV32IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs2, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs2
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs2, 8(sp)
+; RV32IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f18:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs2, 8(sp)
+; RV64IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs2, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs2
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs2, 8(sp)
+; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f18}"(double %a)
@@ -802,24 +802,24 @@ define i32 @explicit_register_fs2(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs2:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs2, 8(sp)
+; RV32IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs2, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs2
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs2, 8(sp)
+; RV32IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs2:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs2, 8(sp)
+; RV64IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs2, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs2
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs2, 8(sp)
+; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs2}"(double %a)
@@ -831,24 +831,24 @@ define i32 @explicit_register_f19(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f19:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs3, 8(sp)
+; RV32IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs3, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs3
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs3, 8(sp)
+; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f19:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs3, 8(sp)
+; RV64IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs3, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs3
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs3, 8(sp)
+; RV64IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f19}"(double %a)
@@ -860,24 +860,24 @@ define i32 @explicit_register_fs3(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs3:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs3, 8(sp)
+; RV32IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs3, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs3
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs3, 8(sp)
+; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs3:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs3, 8(sp)
+; RV64IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs3, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs3
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs3, 8(sp)
+; RV64IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs3}"(double %a)
@@ -889,24 +889,24 @@ define i32 @explicit_register_f20(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f20:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs4, 8(sp)
+; RV32IFD-NEXT:    fsd fs4, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs4, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs4
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs4, 8(sp)
+; RV32IFD-NEXT:    fld fs4, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f20:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs4, 8(sp)
+; RV64IFD-NEXT:    fsd fs4, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs4, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs4
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs4, 8(sp)
+; RV64IFD-NEXT:    fld fs4, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f20}"(double %a)
@@ -918,24 +918,24 @@ define i32 @explicit_register_fs4(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs4:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs4, 8(sp)
+; RV32IFD-NEXT:    fsd fs4, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs4, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs4
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs4, 8(sp)
+; RV32IFD-NEXT:    fld fs4, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs4:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs4, 8(sp)
+; RV64IFD-NEXT:    fsd fs4, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs4, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs4
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs4, 8(sp)
+; RV64IFD-NEXT:    fld fs4, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs4}"(double %a)
@@ -947,24 +947,24 @@ define i32 @explicit_register_f21(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f21:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs5, 8(sp)
+; RV32IFD-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs5, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs5
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs5, 8(sp)
+; RV32IFD-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f21:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs5, 8(sp)
+; RV64IFD-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs5, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs5
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs5, 8(sp)
+; RV64IFD-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f21}"(double %a)
@@ -976,24 +976,24 @@ define i32 @explicit_register_fs5(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs5:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs5, 8(sp)
+; RV32IFD-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs5, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs5
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs5, 8(sp)
+; RV32IFD-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs5:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs5, 8(sp)
+; RV64IFD-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs5, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs5
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs5, 8(sp)
+; RV64IFD-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs5}"(double %a)
@@ -1005,24 +1005,24 @@ define i32 @explicit_register_f22(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f22:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs6, 8(sp)
+; RV32IFD-NEXT:    fsd fs6, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs6, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs6
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs6, 8(sp)
+; RV32IFD-NEXT:    fld fs6, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f22:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs6, 8(sp)
+; RV64IFD-NEXT:    fsd fs6, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs6, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs6
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs6, 8(sp)
+; RV64IFD-NEXT:    fld fs6, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f22}"(double %a)
@@ -1034,24 +1034,24 @@ define i32 @explicit_register_fs6(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs6:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs6, 8(sp)
+; RV32IFD-NEXT:    fsd fs6, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs6, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs6
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs6, 8(sp)
+; RV32IFD-NEXT:    fld fs6, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs6:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs6, 8(sp)
+; RV64IFD-NEXT:    fsd fs6, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs6, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs6
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs6, 8(sp)
+; RV64IFD-NEXT:    fld fs6, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs6}"(double %a)
@@ -1063,24 +1063,24 @@ define i32 @explicit_register_f23(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f23:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs7, 8(sp)
+; RV32IFD-NEXT:    fsd fs7, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs7, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs7
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs7, 8(sp)
+; RV32IFD-NEXT:    fld fs7, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f23:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs7, 8(sp)
+; RV64IFD-NEXT:    fsd fs7, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs7, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs7
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs7, 8(sp)
+; RV64IFD-NEXT:    fld fs7, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f23}"(double %a)
@@ -1092,24 +1092,24 @@ define i32 @explicit_register_fs7(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs7:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs7, 8(sp)
+; RV32IFD-NEXT:    fsd fs7, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs7, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs7
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs7, 8(sp)
+; RV32IFD-NEXT:    fld fs7, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs7:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs7, 8(sp)
+; RV64IFD-NEXT:    fsd fs7, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs7, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs7
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs7, 8(sp)
+; RV64IFD-NEXT:    fld fs7, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs7}"(double %a)
@@ -1121,24 +1121,24 @@ define i32 @explicit_register_f24(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f24:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs8, 8(sp)
+; RV32IFD-NEXT:    fsd fs8, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs8, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs8
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs8, 8(sp)
+; RV32IFD-NEXT:    fld fs8, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f24:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs8, 8(sp)
+; RV64IFD-NEXT:    fsd fs8, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs8, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs8
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs8, 8(sp)
+; RV64IFD-NEXT:    fld fs8, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f24}"(double %a)
@@ -1150,24 +1150,24 @@ define i32 @explicit_register_fs8(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs8:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs8, 8(sp)
+; RV32IFD-NEXT:    fsd fs8, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs8, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs8
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs8, 8(sp)
+; RV32IFD-NEXT:    fld fs8, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs8:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs8, 8(sp)
+; RV64IFD-NEXT:    fsd fs8, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs8, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs8
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs8, 8(sp)
+; RV64IFD-NEXT:    fld fs8, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs8}"(double %a)
@@ -1179,24 +1179,24 @@ define i32 @explicit_register_f25(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f25:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs9, 8(sp)
+; RV32IFD-NEXT:    fsd fs9, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs9, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs9
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs9, 8(sp)
+; RV32IFD-NEXT:    fld fs9, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f25:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs9, 8(sp)
+; RV64IFD-NEXT:    fsd fs9, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs9, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs9
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs9, 8(sp)
+; RV64IFD-NEXT:    fld fs9, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f25}"(double %a)
@@ -1208,24 +1208,24 @@ define i32 @explicit_register_fs9(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs9:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs9, 8(sp)
+; RV32IFD-NEXT:    fsd fs9, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs9, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs9
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs9, 8(sp)
+; RV32IFD-NEXT:    fld fs9, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs9:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs9, 8(sp)
+; RV64IFD-NEXT:    fsd fs9, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs9, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs9
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs9, 8(sp)
+; RV64IFD-NEXT:    fld fs9, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs9}"(double %a)
@@ -1237,24 +1237,24 @@ define i32 @explicit_register_f26(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f26:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs10, 8(sp)
+; RV32IFD-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs10, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs10
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs10, 8(sp)
+; RV32IFD-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f26:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs10, 8(sp)
+; RV64IFD-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs10, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs10
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs10, 8(sp)
+; RV64IFD-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f26}"(double %a)
@@ -1266,24 +1266,24 @@ define i32 @explicit_register_fs10(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs10:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs10, 8(sp)
+; RV32IFD-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs10, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs10
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs10, 8(sp)
+; RV32IFD-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs10:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs10, 8(sp)
+; RV64IFD-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs10, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs10
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs10, 8(sp)
+; RV64IFD-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs10}"(double %a)
@@ -1295,24 +1295,24 @@ define i32 @explicit_register_f27(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_f27:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs11, 8(sp)
+; RV32IFD-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs11, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs11
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs11, 8(sp)
+; RV32IFD-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_f27:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs11, 8(sp)
+; RV64IFD-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs11, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs11
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs11, 8(sp)
+; RV64IFD-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{f27}"(double %a)
@@ -1324,24 +1324,24 @@ define i32 @explicit_register_fs11(double %a) nounwind {
 ; RV32IFD-LABEL: explicit_register_fs11:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fsd fs11, 8(sp)
+; RV32IFD-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs11, fa0
 ; RV32IFD-NEXT:    #APP
 ; RV32IFD-NEXT:    fcvt.w.d a0, fs11
 ; RV32IFD-NEXT:    #NO_APP
-; RV32IFD-NEXT:    fld fs11, 8(sp)
+; RV32IFD-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: explicit_register_fs11:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    fsd fs11, 8(sp)
+; RV64IFD-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    fmv.d fs11, fa0
 ; RV64IFD-NEXT:    #APP
 ; RV64IFD-NEXT:    fcvt.w.d a0, fs11
 ; RV64IFD-NEXT:    #NO_APP
-; RV64IFD-NEXT:    fld fs11, 8(sp)
+; RV64IFD-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.d $0, $1", "=r,{fs11}"(double %a)

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-f-abi-names.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-abi-names.ll
index 8eddc3ebf2d8..06a624fe953a 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-f-abi-names.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-f-abi-names.ll
@@ -341,24 +341,24 @@ define i32 @explicit_register_f8(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f8:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs0, 12(sp)
+; RV32IF-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs0, 12(sp)
+; RV32IF-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f8:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs0, 12(sp)
+; RV64IF-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs0, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs0
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs0, 12(sp)
+; RV64IF-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f8}"(float %a)
@@ -370,24 +370,24 @@ define i32 @explicit_register_fs0(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs0:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs0, 12(sp)
+; RV32IF-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs0, 12(sp)
+; RV32IF-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs0:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs0, 12(sp)
+; RV64IF-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs0, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs0
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs0, 12(sp)
+; RV64IF-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs0}"(float %a)
@@ -399,24 +399,24 @@ define i32 @explicit_register_f9(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f9:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs1, 12(sp)
+; RV32IF-NEXT:    fsw fs1, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs1, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs1
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs1, 12(sp)
+; RV32IF-NEXT:    flw fs1, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f9:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs1, 12(sp)
+; RV64IF-NEXT:    fsw fs1, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs1, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs1
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs1, 12(sp)
+; RV64IF-NEXT:    flw fs1, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f9}"(float %a)
@@ -428,24 +428,24 @@ define i32 @explicit_register_fs1(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs1:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs1, 12(sp)
+; RV32IF-NEXT:    fsw fs1, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs1, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs1
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs1, 12(sp)
+; RV32IF-NEXT:    flw fs1, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs1:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs1, 12(sp)
+; RV64IF-NEXT:    fsw fs1, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs1, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs1
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs1, 12(sp)
+; RV64IF-NEXT:    flw fs1, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs1}"(float %a)
@@ -773,24 +773,24 @@ define i32 @explicit_register_f18(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f18:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs2, 12(sp)
+; RV32IF-NEXT:    fsw fs2, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs2, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs2
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs2, 12(sp)
+; RV32IF-NEXT:    flw fs2, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f18:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs2, 12(sp)
+; RV64IF-NEXT:    fsw fs2, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs2, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs2
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs2, 12(sp)
+; RV64IF-NEXT:    flw fs2, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f18}"(float %a)
@@ -802,24 +802,24 @@ define i32 @explicit_register_fs2(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs2:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs2, 12(sp)
+; RV32IF-NEXT:    fsw fs2, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs2, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs2
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs2, 12(sp)
+; RV32IF-NEXT:    flw fs2, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs2:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs2, 12(sp)
+; RV64IF-NEXT:    fsw fs2, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs2, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs2
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs2, 12(sp)
+; RV64IF-NEXT:    flw fs2, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs2}"(float %a)
@@ -831,24 +831,24 @@ define i32 @explicit_register_f19(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f19:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs3, 12(sp)
+; RV32IF-NEXT:    fsw fs3, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs3, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs3
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs3, 12(sp)
+; RV32IF-NEXT:    flw fs3, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f19:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs3, 12(sp)
+; RV64IF-NEXT:    fsw fs3, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs3, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs3
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs3, 12(sp)
+; RV64IF-NEXT:    flw fs3, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f19}"(float %a)
@@ -860,24 +860,24 @@ define i32 @explicit_register_fs3(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs3:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs3, 12(sp)
+; RV32IF-NEXT:    fsw fs3, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs3, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs3
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs3, 12(sp)
+; RV32IF-NEXT:    flw fs3, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs3:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs3, 12(sp)
+; RV64IF-NEXT:    fsw fs3, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs3, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs3
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs3, 12(sp)
+; RV64IF-NEXT:    flw fs3, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs3}"(float %a)
@@ -889,24 +889,24 @@ define i32 @explicit_register_f20(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f20:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs4, 12(sp)
+; RV32IF-NEXT:    fsw fs4, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs4, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs4
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs4, 12(sp)
+; RV32IF-NEXT:    flw fs4, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f20:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs4, 12(sp)
+; RV64IF-NEXT:    fsw fs4, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs4, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs4
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs4, 12(sp)
+; RV64IF-NEXT:    flw fs4, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f20}"(float %a)
@@ -918,24 +918,24 @@ define i32 @explicit_register_fs4(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs4:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs4, 12(sp)
+; RV32IF-NEXT:    fsw fs4, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs4, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs4
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs4, 12(sp)
+; RV32IF-NEXT:    flw fs4, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs4:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs4, 12(sp)
+; RV64IF-NEXT:    fsw fs4, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs4, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs4
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs4, 12(sp)
+; RV64IF-NEXT:    flw fs4, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs4}"(float %a)
@@ -947,24 +947,24 @@ define i32 @explicit_register_f21(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f21:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs5, 12(sp)
+; RV32IF-NEXT:    fsw fs5, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs5, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs5
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs5, 12(sp)
+; RV32IF-NEXT:    flw fs5, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f21:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs5, 12(sp)
+; RV64IF-NEXT:    fsw fs5, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs5, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs5
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs5, 12(sp)
+; RV64IF-NEXT:    flw fs5, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f21}"(float %a)
@@ -976,24 +976,24 @@ define i32 @explicit_register_fs5(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs5:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs5, 12(sp)
+; RV32IF-NEXT:    fsw fs5, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs5, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs5
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs5, 12(sp)
+; RV32IF-NEXT:    flw fs5, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs5:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs5, 12(sp)
+; RV64IF-NEXT:    fsw fs5, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs5, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs5
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs5, 12(sp)
+; RV64IF-NEXT:    flw fs5, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs5}"(float %a)
@@ -1005,24 +1005,24 @@ define i32 @explicit_register_f22(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f22:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs6, 12(sp)
+; RV32IF-NEXT:    fsw fs6, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs6, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs6
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs6, 12(sp)
+; RV32IF-NEXT:    flw fs6, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f22:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs6, 12(sp)
+; RV64IF-NEXT:    fsw fs6, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs6, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs6
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs6, 12(sp)
+; RV64IF-NEXT:    flw fs6, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f22}"(float %a)
@@ -1034,24 +1034,24 @@ define i32 @explicit_register_fs6(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs6:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs6, 12(sp)
+; RV32IF-NEXT:    fsw fs6, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs6, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs6
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs6, 12(sp)
+; RV32IF-NEXT:    flw fs6, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs6:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs6, 12(sp)
+; RV64IF-NEXT:    fsw fs6, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs6, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs6
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs6, 12(sp)
+; RV64IF-NEXT:    flw fs6, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs6}"(float %a)
@@ -1063,24 +1063,24 @@ define i32 @explicit_register_f23(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f23:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs7, 12(sp)
+; RV32IF-NEXT:    fsw fs7, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs7, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs7
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs7, 12(sp)
+; RV32IF-NEXT:    flw fs7, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f23:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs7, 12(sp)
+; RV64IF-NEXT:    fsw fs7, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs7, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs7
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs7, 12(sp)
+; RV64IF-NEXT:    flw fs7, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f23}"(float %a)
@@ -1092,24 +1092,24 @@ define i32 @explicit_register_fs7(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs7:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs7, 12(sp)
+; RV32IF-NEXT:    fsw fs7, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs7, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs7
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs7, 12(sp)
+; RV32IF-NEXT:    flw fs7, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs7:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs7, 12(sp)
+; RV64IF-NEXT:    fsw fs7, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs7, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs7
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs7, 12(sp)
+; RV64IF-NEXT:    flw fs7, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs7}"(float %a)
@@ -1121,24 +1121,24 @@ define i32 @explicit_register_f24(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f24:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs8, 12(sp)
+; RV32IF-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs8, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs8
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs8, 12(sp)
+; RV32IF-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f24:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs8, 12(sp)
+; RV64IF-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs8, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs8
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs8, 12(sp)
+; RV64IF-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f24}"(float %a)
@@ -1150,24 +1150,24 @@ define i32 @explicit_register_fs8(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs8:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs8, 12(sp)
+; RV32IF-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs8, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs8
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs8, 12(sp)
+; RV32IF-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs8:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs8, 12(sp)
+; RV64IF-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs8, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs8
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs8, 12(sp)
+; RV64IF-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs8}"(float %a)
@@ -1179,24 +1179,24 @@ define i32 @explicit_register_f25(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f25:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs9, 12(sp)
+; RV32IF-NEXT:    fsw fs9, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs9, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs9
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs9, 12(sp)
+; RV32IF-NEXT:    flw fs9, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f25:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs9, 12(sp)
+; RV64IF-NEXT:    fsw fs9, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs9, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs9
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs9, 12(sp)
+; RV64IF-NEXT:    flw fs9, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f25}"(float %a)
@@ -1208,24 +1208,24 @@ define i32 @explicit_register_fs9(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs9:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs9, 12(sp)
+; RV32IF-NEXT:    fsw fs9, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs9, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs9
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs9, 12(sp)
+; RV32IF-NEXT:    flw fs9, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs9:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs9, 12(sp)
+; RV64IF-NEXT:    fsw fs9, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs9, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs9
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs9, 12(sp)
+; RV64IF-NEXT:    flw fs9, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs9}"(float %a)
@@ -1237,24 +1237,24 @@ define i32 @explicit_register_f26(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f26:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs10, 12(sp)
+; RV32IF-NEXT:    fsw fs10, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs10, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs10
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs10, 12(sp)
+; RV32IF-NEXT:    flw fs10, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f26:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs10, 12(sp)
+; RV64IF-NEXT:    fsw fs10, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs10, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs10
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs10, 12(sp)
+; RV64IF-NEXT:    flw fs10, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f26}"(float %a)
@@ -1266,24 +1266,24 @@ define i32 @explicit_register_fs10(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs10:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs10, 12(sp)
+; RV32IF-NEXT:    fsw fs10, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs10, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs10
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs10, 12(sp)
+; RV32IF-NEXT:    flw fs10, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs10:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs10, 12(sp)
+; RV64IF-NEXT:    fsw fs10, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs10, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs10
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs10, 12(sp)
+; RV64IF-NEXT:    flw fs10, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs10}"(float %a)
@@ -1295,24 +1295,24 @@ define i32 @explicit_register_f27(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_f27:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs11, 12(sp)
+; RV32IF-NEXT:    fsw fs11, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs11, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs11
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs11, 12(sp)
+; RV32IF-NEXT:    flw fs11, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_f27:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs11, 12(sp)
+; RV64IF-NEXT:    fsw fs11, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs11, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs11
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs11, 12(sp)
+; RV64IF-NEXT:    flw fs11, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{f27}"(float %a)
@@ -1324,24 +1324,24 @@ define i32 @explicit_register_fs11(float %a) nounwind {
 ; RV32IF-LABEL: explicit_register_fs11:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    fsw fs11, 12(sp)
+; RV32IF-NEXT:    fsw fs11, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs11, fa0
 ; RV32IF-NEXT:    #APP
 ; RV32IF-NEXT:    fcvt.w.s a0, fs11
 ; RV32IF-NEXT:    #NO_APP
-; RV32IF-NEXT:    flw fs11, 12(sp)
+; RV32IF-NEXT:    flw fs11, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: explicit_register_fs11:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    fsw fs11, 12(sp)
+; RV64IF-NEXT:    fsw fs11, 12(sp) # 4-byte Folded Spill
 ; RV64IF-NEXT:    fmv.s fs11, fa0
 ; RV64IF-NEXT:    #APP
 ; RV64IF-NEXT:    fcvt.w.s a0, fs11
 ; RV64IF-NEXT:    #NO_APP
-; RV64IF-NEXT:    flw fs11, 12(sp)
+; RV64IF-NEXT:    flw fs11, 12(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = tail call i32 asm "fcvt.w.s $0, $1", "=r,{fs11}"(float %a)

diff  --git a/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
index 457b8667e10d..6dc86c54ec7c 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
@@ -14,50 +14,50 @@ define dso_local void @handler() nounwind {
 ; CHECK-RV32-LABEL: handler:
 ; CHECK-RV32:       # %bb.0: # %entry
 ; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    sw ra, 12(sp)
-; CHECK-RV32-NEXT:    sw s0, 8(sp)
+; CHECK-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    lui a0, 2
 ; CHECK-RV32-NEXT:    addi a0, a0, 4
-; CHECK-RV32-NEXT:    call read
+; CHECK-RV32-NEXT:    call read at plt
 ; CHECK-RV32-NEXT:    mv s0, a0
-; CHECK-RV32-NEXT:    call callee
+; CHECK-RV32-NEXT:    call callee at plt
 ; CHECK-RV32-NEXT:    mv a0, s0
-; CHECK-RV32-NEXT:    lw s0, 8(sp)
-; CHECK-RV32-NEXT:    lw ra, 12(sp)
+; CHECK-RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
-; CHECK-RV32-NEXT:    tail write
+; CHECK-RV32-NEXT:    tail write at plt
 ;
 ; CHECK-RV32-F-LABEL: handler:
 ; CHECK-RV32-F:       # %bb.0: # %entry
 ; CHECK-RV32-F-NEXT:    addi sp, sp, -16
-; CHECK-RV32-F-NEXT:    sw ra, 12(sp)
-; CHECK-RV32-F-NEXT:    sw s0, 8(sp)
+; CHECK-RV32-F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    lui a0, 2
 ; CHECK-RV32-F-NEXT:    addi a0, a0, 4
-; CHECK-RV32-F-NEXT:    call read
+; CHECK-RV32-F-NEXT:    call read at plt
 ; CHECK-RV32-F-NEXT:    mv s0, a0
-; CHECK-RV32-F-NEXT:    call callee
+; CHECK-RV32-F-NEXT:    call callee at plt
 ; CHECK-RV32-F-NEXT:    mv a0, s0
-; CHECK-RV32-F-NEXT:    lw s0, 8(sp)
-; CHECK-RV32-F-NEXT:    lw ra, 12(sp)
+; CHECK-RV32-F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    addi sp, sp, 16
-; CHECK-RV32-F-NEXT:    tail write
+; CHECK-RV32-F-NEXT:    tail write at plt
 ;
 ; CHECK-RV32-FD-LABEL: handler:
 ; CHECK-RV32-FD:       # %bb.0: # %entry
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, -16
-; CHECK-RV32-FD-NEXT:    sw ra, 12(sp)
-; CHECK-RV32-FD-NEXT:    sw s0, 8(sp)
+; CHECK-RV32-FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    lui a0, 2
 ; CHECK-RV32-FD-NEXT:    addi a0, a0, 4
-; CHECK-RV32-FD-NEXT:    call read
+; CHECK-RV32-FD-NEXT:    call read at plt
 ; CHECK-RV32-FD-NEXT:    mv s0, a0
-; CHECK-RV32-FD-NEXT:    call callee
+; CHECK-RV32-FD-NEXT:    call callee at plt
 ; CHECK-RV32-FD-NEXT:    mv a0, s0
-; CHECK-RV32-FD-NEXT:    lw s0, 8(sp)
-; CHECK-RV32-FD-NEXT:    lw ra, 12(sp)
+; CHECK-RV32-FD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, 16
-; CHECK-RV32-FD-NEXT:    tail write
+; CHECK-RV32-FD-NEXT:    tail write at plt
 entry:
   %call = tail call i32 @read(i32 8196)
   tail call void bitcast (void (...)* @callee to void ()*)()

diff  --git a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
index 025f92c3de96..32ab8a36832f 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
@@ -26,8 +26,8 @@ define void @foo_i32() nounwind #0 {
 ; CHECK-RV32-LABEL: foo_i32:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    sw a0, 12(sp)
-; CHECK-RV32-NEXT:    sw a1, 8(sp)
+; CHECK-RV32-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    lui a0, %hi(a)
 ; CHECK-RV32-NEXT:    lw a0, %lo(a)(a0)
 ; CHECK-RV32-NEXT:    lui a1, %hi(b)
@@ -35,16 +35,16 @@ define void @foo_i32() nounwind #0 {
 ; CHECK-RV32-NEXT:    add a0, a1, a0
 ; CHECK-RV32-NEXT:    lui a1, %hi(c)
 ; CHECK-RV32-NEXT:    sw a0, %lo(c)(a1)
-; CHECK-RV32-NEXT:    lw a1, 8(sp)
-; CHECK-RV32-NEXT:    lw a0, 12(sp)
+; CHECK-RV32-NEXT:    lw a1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32IF-LABEL: foo_i32:
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -16
-; CHECK-RV32IF-NEXT:    sw a0, 12(sp)
-; CHECK-RV32IF-NEXT:    sw a1, 8(sp)
+; CHECK-RV32IF-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a1, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(a)
 ; CHECK-RV32IF-NEXT:    lw a0, %lo(a)(a0)
 ; CHECK-RV32IF-NEXT:    lui a1, %hi(b)
@@ -52,16 +52,16 @@ define void @foo_i32() nounwind #0 {
 ; CHECK-RV32IF-NEXT:    add a0, a1, a0
 ; CHECK-RV32IF-NEXT:    lui a1, %hi(c)
 ; CHECK-RV32IF-NEXT:    sw a0, %lo(c)(a1)
-; CHECK-RV32IF-NEXT:    lw a1, 8(sp)
-; CHECK-RV32IF-NEXT:    lw a0, 12(sp)
+; CHECK-RV32IF-NEXT:    lw a1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 16
 ; CHECK-RV32IF-NEXT:    mret
 ;
 ; CHECK-RV32IFD-LABEL: foo_i32:
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -16
-; CHECK-RV32IFD-NEXT:    sw a0, 12(sp)
-; CHECK-RV32IFD-NEXT:    sw a1, 8(sp)
+; CHECK-RV32IFD-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw a1, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(a)
 ; CHECK-RV32IFD-NEXT:    lw a0, %lo(a)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a1, %hi(b)
@@ -69,8 +69,8 @@ define void @foo_i32() nounwind #0 {
 ; CHECK-RV32IFD-NEXT:    add a0, a1, a0
 ; CHECK-RV32IFD-NEXT:    lui a1, %hi(c)
 ; CHECK-RV32IFD-NEXT:    sw a0, %lo(c)(a1)
-; CHECK-RV32IFD-NEXT:    lw a1, 8(sp)
-; CHECK-RV32IFD-NEXT:    lw a0, 12(sp)
+; CHECK-RV32IFD-NEXT:    lw a1, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 16
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load i32, i32* @a
@@ -88,10 +88,10 @@ define void @foo_fp_i32() nounwind #1 {
 ; CHECK-RV32-LABEL: foo_fp_i32:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    sw ra, 12(sp)
-; CHECK-RV32-NEXT:    sw s0, 8(sp)
-; CHECK-RV32-NEXT:    sw a0, 4(sp)
-; CHECK-RV32-NEXT:    sw a1, 0(sp)
+; CHECK-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 0(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    addi s0, sp, 16
 ; CHECK-RV32-NEXT:    lui a0, %hi(a)
 ; CHECK-RV32-NEXT:    lw a0, %lo(a)(a0)
@@ -100,20 +100,20 @@ define void @foo_fp_i32() nounwind #1 {
 ; CHECK-RV32-NEXT:    add a0, a1, a0
 ; CHECK-RV32-NEXT:    lui a1, %hi(c)
 ; CHECK-RV32-NEXT:    sw a0, %lo(c)(a1)
-; CHECK-RV32-NEXT:    lw a1, 0(sp)
-; CHECK-RV32-NEXT:    lw a0, 4(sp)
-; CHECK-RV32-NEXT:    lw s0, 8(sp)
-; CHECK-RV32-NEXT:    lw ra, 12(sp)
+; CHECK-RV32-NEXT:    lw a1, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32IF-LABEL: foo_fp_i32:
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -16
-; CHECK-RV32IF-NEXT:    sw ra, 12(sp)
-; CHECK-RV32IF-NEXT:    sw s0, 8(sp)
-; CHECK-RV32IF-NEXT:    sw a0, 4(sp)
-; CHECK-RV32IF-NEXT:    sw a1, 0(sp)
+; CHECK-RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a1, 0(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    addi s0, sp, 16
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(a)
 ; CHECK-RV32IF-NEXT:    lw a0, %lo(a)(a0)
@@ -122,20 +122,20 @@ define void @foo_fp_i32() nounwind #1 {
 ; CHECK-RV32IF-NEXT:    add a0, a1, a0
 ; CHECK-RV32IF-NEXT:    lui a1, %hi(c)
 ; CHECK-RV32IF-NEXT:    sw a0, %lo(c)(a1)
-; CHECK-RV32IF-NEXT:    lw a1, 0(sp)
-; CHECK-RV32IF-NEXT:    lw a0, 4(sp)
-; CHECK-RV32IF-NEXT:    lw s0, 8(sp)
-; CHECK-RV32IF-NEXT:    lw ra, 12(sp)
+; CHECK-RV32IF-NEXT:    lw a1, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 16
 ; CHECK-RV32IF-NEXT:    mret
 ;
 ; CHECK-RV32IFD-LABEL: foo_fp_i32:
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -16
-; CHECK-RV32IFD-NEXT:    sw ra, 12(sp)
-; CHECK-RV32IFD-NEXT:    sw s0, 8(sp)
-; CHECK-RV32IFD-NEXT:    sw a0, 4(sp)
-; CHECK-RV32IFD-NEXT:    sw a1, 0(sp)
+; CHECK-RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw a0, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw a1, 0(sp) # 4-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    addi s0, sp, 16
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(a)
 ; CHECK-RV32IFD-NEXT:    lw a0, %lo(a)(a0)
@@ -144,10 +144,10 @@ define void @foo_fp_i32() nounwind #1 {
 ; CHECK-RV32IFD-NEXT:    add a0, a1, a0
 ; CHECK-RV32IFD-NEXT:    lui a1, %hi(c)
 ; CHECK-RV32IFD-NEXT:    sw a0, %lo(c)(a1)
-; CHECK-RV32IFD-NEXT:    lw a1, 0(sp)
-; CHECK-RV32IFD-NEXT:    lw a0, 4(sp)
-; CHECK-RV32IFD-NEXT:    lw s0, 8(sp)
-; CHECK-RV32IFD-NEXT:    lw ra, 12(sp)
+; CHECK-RV32IFD-NEXT:    lw a1, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 16
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load i32, i32* @a
@@ -165,54 +165,54 @@ define void @foo_float() nounwind #0 {
 ; CHECK-RV32-LABEL: foo_float:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    sw ra, 60(sp)
-; CHECK-RV32-NEXT:    sw t0, 56(sp)
-; CHECK-RV32-NEXT:    sw t1, 52(sp)
-; CHECK-RV32-NEXT:    sw t2, 48(sp)
-; CHECK-RV32-NEXT:    sw a0, 44(sp)
-; CHECK-RV32-NEXT:    sw a1, 40(sp)
-; CHECK-RV32-NEXT:    sw a2, 36(sp)
-; CHECK-RV32-NEXT:    sw a3, 32(sp)
-; CHECK-RV32-NEXT:    sw a4, 28(sp)
-; CHECK-RV32-NEXT:    sw a5, 24(sp)
-; CHECK-RV32-NEXT:    sw a6, 20(sp)
-; CHECK-RV32-NEXT:    sw a7, 16(sp)
-; CHECK-RV32-NEXT:    sw t3, 12(sp)
-; CHECK-RV32-NEXT:    sw t4, 8(sp)
-; CHECK-RV32-NEXT:    sw t5, 4(sp)
-; CHECK-RV32-NEXT:    sw t6, 0(sp)
+; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a2, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a3, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a4, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a6, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a7, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t3, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    lui a0, %hi(e)
 ; CHECK-RV32-NEXT:    lw a0, %lo(e)(a0)
 ; CHECK-RV32-NEXT:    lui a1, %hi(f)
 ; CHECK-RV32-NEXT:    lw a1, %lo(f)(a1)
-; CHECK-RV32-NEXT:    call __addsf3
+; CHECK-RV32-NEXT:    call __addsf3 at plt
 ; CHECK-RV32-NEXT:    lui a1, %hi(d)
 ; CHECK-RV32-NEXT:    sw a0, %lo(d)(a1)
-; CHECK-RV32-NEXT:    lw t6, 0(sp)
-; CHECK-RV32-NEXT:    lw t5, 4(sp)
-; CHECK-RV32-NEXT:    lw t4, 8(sp)
-; CHECK-RV32-NEXT:    lw t3, 12(sp)
-; CHECK-RV32-NEXT:    lw a7, 16(sp)
-; CHECK-RV32-NEXT:    lw a6, 20(sp)
-; CHECK-RV32-NEXT:    lw a5, 24(sp)
-; CHECK-RV32-NEXT:    lw a4, 28(sp)
-; CHECK-RV32-NEXT:    lw a3, 32(sp)
-; CHECK-RV32-NEXT:    lw a2, 36(sp)
-; CHECK-RV32-NEXT:    lw a1, 40(sp)
-; CHECK-RV32-NEXT:    lw a0, 44(sp)
-; CHECK-RV32-NEXT:    lw t2, 48(sp)
-; CHECK-RV32-NEXT:    lw t1, 52(sp)
-; CHECK-RV32-NEXT:    lw t0, 56(sp)
-; CHECK-RV32-NEXT:    lw ra, 60(sp)
+; CHECK-RV32-NEXT:    lw t6, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t5, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t4, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t3, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a7, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a1, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 64
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32IF-LABEL: foo_float:
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -16
-; CHECK-RV32IF-NEXT:    sw a0, 12(sp)
-; CHECK-RV32IF-NEXT:    fsw ft0, 8(sp)
-; CHECK-RV32IF-NEXT:    fsw ft1, 4(sp)
+; CHECK-RV32IF-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(e)
 ; CHECK-RV32IF-NEXT:    flw ft0, %lo(e)(a0)
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(f)
@@ -220,18 +220,18 @@ define void @foo_float() nounwind #0 {
 ; CHECK-RV32IF-NEXT:    fadd.s ft0, ft0, ft1
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(d)
 ; CHECK-RV32IF-NEXT:    fsw ft0, %lo(d)(a0)
-; CHECK-RV32IF-NEXT:    flw ft1, 4(sp)
-; CHECK-RV32IF-NEXT:    flw ft0, 8(sp)
-; CHECK-RV32IF-NEXT:    lw a0, 12(sp)
+; CHECK-RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 16
 ; CHECK-RV32IF-NEXT:    mret
 ;
 ; CHECK-RV32IFD-LABEL: foo_float:
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -32
-; CHECK-RV32IFD-NEXT:    sw a0, 28(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft0, 16(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft1, 8(sp)
+; CHECK-RV32IFD-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft1, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(e)
 ; CHECK-RV32IFD-NEXT:    flw ft0, %lo(e)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(f)
@@ -239,9 +239,9 @@ define void @foo_float() nounwind #0 {
 ; CHECK-RV32IFD-NEXT:    fadd.s ft0, ft0, ft1
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(d)
 ; CHECK-RV32IFD-NEXT:    fsw ft0, %lo(d)(a0)
-; CHECK-RV32IFD-NEXT:    fld ft1, 8(sp)
-; CHECK-RV32IFD-NEXT:    fld ft0, 16(sp)
-; CHECK-RV32IFD-NEXT:    lw a0, 28(sp)
+; CHECK-RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load float, float* @e
@@ -258,59 +258,59 @@ define void @foo_fp_float() nounwind #1 {
 ; CHECK-RV32-LABEL: foo_fp_float:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -80
-; CHECK-RV32-NEXT:    sw ra, 76(sp)
-; CHECK-RV32-NEXT:    sw t0, 72(sp)
-; CHECK-RV32-NEXT:    sw t1, 68(sp)
-; CHECK-RV32-NEXT:    sw t2, 64(sp)
-; CHECK-RV32-NEXT:    sw s0, 60(sp)
-; CHECK-RV32-NEXT:    sw a0, 56(sp)
-; CHECK-RV32-NEXT:    sw a1, 52(sp)
-; CHECK-RV32-NEXT:    sw a2, 48(sp)
-; CHECK-RV32-NEXT:    sw a3, 44(sp)
-; CHECK-RV32-NEXT:    sw a4, 40(sp)
-; CHECK-RV32-NEXT:    sw a5, 36(sp)
-; CHECK-RV32-NEXT:    sw a6, 32(sp)
-; CHECK-RV32-NEXT:    sw a7, 28(sp)
-; CHECK-RV32-NEXT:    sw t3, 24(sp)
-; CHECK-RV32-NEXT:    sw t4, 20(sp)
-; CHECK-RV32-NEXT:    sw t5, 16(sp)
-; CHECK-RV32-NEXT:    sw t6, 12(sp)
+; CHECK-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t0, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t1, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t2, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s0, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a3, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a4, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a5, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a6, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a7, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t3, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t4, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    addi s0, sp, 80
 ; CHECK-RV32-NEXT:    lui a0, %hi(e)
 ; CHECK-RV32-NEXT:    lw a0, %lo(e)(a0)
 ; CHECK-RV32-NEXT:    lui a1, %hi(f)
 ; CHECK-RV32-NEXT:    lw a1, %lo(f)(a1)
-; CHECK-RV32-NEXT:    call __addsf3
+; CHECK-RV32-NEXT:    call __addsf3 at plt
 ; CHECK-RV32-NEXT:    lui a1, %hi(d)
 ; CHECK-RV32-NEXT:    sw a0, %lo(d)(a1)
-; CHECK-RV32-NEXT:    lw t6, 12(sp)
-; CHECK-RV32-NEXT:    lw t5, 16(sp)
-; CHECK-RV32-NEXT:    lw t4, 20(sp)
-; CHECK-RV32-NEXT:    lw t3, 24(sp)
-; CHECK-RV32-NEXT:    lw a7, 28(sp)
-; CHECK-RV32-NEXT:    lw a6, 32(sp)
-; CHECK-RV32-NEXT:    lw a5, 36(sp)
-; CHECK-RV32-NEXT:    lw a4, 40(sp)
-; CHECK-RV32-NEXT:    lw a3, 44(sp)
-; CHECK-RV32-NEXT:    lw a2, 48(sp)
-; CHECK-RV32-NEXT:    lw a1, 52(sp)
-; CHECK-RV32-NEXT:    lw a0, 56(sp)
-; CHECK-RV32-NEXT:    lw s0, 60(sp)
-; CHECK-RV32-NEXT:    lw t2, 64(sp)
-; CHECK-RV32-NEXT:    lw t1, 68(sp)
-; CHECK-RV32-NEXT:    lw t0, 72(sp)
-; CHECK-RV32-NEXT:    lw ra, 76(sp)
+; CHECK-RV32-NEXT:    lw t6, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t4, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t3, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a7, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a6, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a5, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a4, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw s0, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t2, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t1, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t0, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 80
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32IF-LABEL: foo_fp_float:
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -32
-; CHECK-RV32IF-NEXT:    sw ra, 28(sp)
-; CHECK-RV32IF-NEXT:    sw s0, 24(sp)
-; CHECK-RV32IF-NEXT:    sw a0, 20(sp)
-; CHECK-RV32IF-NEXT:    fsw ft0, 16(sp)
-; CHECK-RV32IF-NEXT:    fsw ft1, 12(sp)
+; CHECK-RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft1, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    addi s0, sp, 32
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(e)
 ; CHECK-RV32IF-NEXT:    flw ft0, %lo(e)(a0)
@@ -319,22 +319,22 @@ define void @foo_fp_float() nounwind #1 {
 ; CHECK-RV32IF-NEXT:    fadd.s ft0, ft0, ft1
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(d)
 ; CHECK-RV32IF-NEXT:    fsw ft0, %lo(d)(a0)
-; CHECK-RV32IF-NEXT:    flw ft1, 12(sp)
-; CHECK-RV32IF-NEXT:    flw ft0, 16(sp)
-; CHECK-RV32IF-NEXT:    lw a0, 20(sp)
-; CHECK-RV32IF-NEXT:    lw s0, 24(sp)
-; CHECK-RV32IF-NEXT:    lw ra, 28(sp)
+; CHECK-RV32IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IF-NEXT:    mret
 ;
 ; CHECK-RV32IFD-LABEL: foo_fp_float:
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -32
-; CHECK-RV32IFD-NEXT:    sw ra, 28(sp)
-; CHECK-RV32IFD-NEXT:    sw s0, 24(sp)
-; CHECK-RV32IFD-NEXT:    sw a0, 20(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft0, 8(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft1, 0(sp)
+; CHECK-RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft1, 0(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    addi s0, sp, 32
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(e)
 ; CHECK-RV32IFD-NEXT:    flw ft0, %lo(e)(a0)
@@ -343,11 +343,11 @@ define void @foo_fp_float() nounwind #1 {
 ; CHECK-RV32IFD-NEXT:    fadd.s ft0, ft0, ft1
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(d)
 ; CHECK-RV32IFD-NEXT:    fsw ft0, %lo(d)(a0)
-; CHECK-RV32IFD-NEXT:    fld ft1, 0(sp)
-; CHECK-RV32IFD-NEXT:    fld ft0, 8(sp)
-; CHECK-RV32IFD-NEXT:    lw a0, 20(sp)
-; CHECK-RV32IFD-NEXT:    lw s0, 24(sp)
-; CHECK-RV32IFD-NEXT:    lw ra, 28(sp)
+; CHECK-RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load float, float* @e
@@ -365,169 +365,169 @@ define void @foo_double() nounwind #0 {
 ; CHECK-RV32-LABEL: foo_double:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    sw ra, 60(sp)
-; CHECK-RV32-NEXT:    sw t0, 56(sp)
-; CHECK-RV32-NEXT:    sw t1, 52(sp)
-; CHECK-RV32-NEXT:    sw t2, 48(sp)
-; CHECK-RV32-NEXT:    sw a0, 44(sp)
-; CHECK-RV32-NEXT:    sw a1, 40(sp)
-; CHECK-RV32-NEXT:    sw a2, 36(sp)
-; CHECK-RV32-NEXT:    sw a3, 32(sp)
-; CHECK-RV32-NEXT:    sw a4, 28(sp)
-; CHECK-RV32-NEXT:    sw a5, 24(sp)
-; CHECK-RV32-NEXT:    sw a6, 20(sp)
-; CHECK-RV32-NEXT:    sw a7, 16(sp)
-; CHECK-RV32-NEXT:    sw t3, 12(sp)
-; CHECK-RV32-NEXT:    sw t4, 8(sp)
-; CHECK-RV32-NEXT:    sw t5, 4(sp)
-; CHECK-RV32-NEXT:    sw t6, 0(sp)
+; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a2, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a3, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a4, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a6, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a7, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t3, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    lui a1, %hi(h)
 ; CHECK-RV32-NEXT:    lw a0, %lo(h)(a1)
 ; CHECK-RV32-NEXT:    lw a1, %lo(h+4)(a1)
 ; CHECK-RV32-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32-NEXT:    call __adddf3
+; CHECK-RV32-NEXT:    call __adddf3 at plt
 ; CHECK-RV32-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32-NEXT:    sw a0, %lo(g)(a2)
-; CHECK-RV32-NEXT:    lw t6, 0(sp)
-; CHECK-RV32-NEXT:    lw t5, 4(sp)
-; CHECK-RV32-NEXT:    lw t4, 8(sp)
-; CHECK-RV32-NEXT:    lw t3, 12(sp)
-; CHECK-RV32-NEXT:    lw a7, 16(sp)
-; CHECK-RV32-NEXT:    lw a6, 20(sp)
-; CHECK-RV32-NEXT:    lw a5, 24(sp)
-; CHECK-RV32-NEXT:    lw a4, 28(sp)
-; CHECK-RV32-NEXT:    lw a3, 32(sp)
-; CHECK-RV32-NEXT:    lw a2, 36(sp)
-; CHECK-RV32-NEXT:    lw a1, 40(sp)
-; CHECK-RV32-NEXT:    lw a0, 44(sp)
-; CHECK-RV32-NEXT:    lw t2, 48(sp)
-; CHECK-RV32-NEXT:    lw t1, 52(sp)
-; CHECK-RV32-NEXT:    lw t0, 56(sp)
-; CHECK-RV32-NEXT:    lw ra, 60(sp)
+; CHECK-RV32-NEXT:    lw t6, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t5, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t4, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t3, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a7, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a1, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 64
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32IF-LABEL: foo_double:
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -192
-; CHECK-RV32IF-NEXT:    sw ra, 188(sp)
-; CHECK-RV32IF-NEXT:    sw t0, 184(sp)
-; CHECK-RV32IF-NEXT:    sw t1, 180(sp)
-; CHECK-RV32IF-NEXT:    sw t2, 176(sp)
-; CHECK-RV32IF-NEXT:    sw a0, 172(sp)
-; CHECK-RV32IF-NEXT:    sw a1, 168(sp)
-; CHECK-RV32IF-NEXT:    sw a2, 164(sp)
-; CHECK-RV32IF-NEXT:    sw a3, 160(sp)
-; CHECK-RV32IF-NEXT:    sw a4, 156(sp)
-; CHECK-RV32IF-NEXT:    sw a5, 152(sp)
-; CHECK-RV32IF-NEXT:    sw a6, 148(sp)
-; CHECK-RV32IF-NEXT:    sw a7, 144(sp)
-; CHECK-RV32IF-NEXT:    sw t3, 140(sp)
-; CHECK-RV32IF-NEXT:    sw t4, 136(sp)
-; CHECK-RV32IF-NEXT:    sw t5, 132(sp)
-; CHECK-RV32IF-NEXT:    sw t6, 128(sp)
-; CHECK-RV32IF-NEXT:    fsw ft0, 124(sp)
-; CHECK-RV32IF-NEXT:    fsw ft1, 120(sp)
-; CHECK-RV32IF-NEXT:    fsw ft2, 116(sp)
-; CHECK-RV32IF-NEXT:    fsw ft3, 112(sp)
-; CHECK-RV32IF-NEXT:    fsw ft4, 108(sp)
-; CHECK-RV32IF-NEXT:    fsw ft5, 104(sp)
-; CHECK-RV32IF-NEXT:    fsw ft6, 100(sp)
-; CHECK-RV32IF-NEXT:    fsw ft7, 96(sp)
-; CHECK-RV32IF-NEXT:    fsw fa0, 92(sp)
-; CHECK-RV32IF-NEXT:    fsw fa1, 88(sp)
-; CHECK-RV32IF-NEXT:    fsw fa2, 84(sp)
-; CHECK-RV32IF-NEXT:    fsw fa3, 80(sp)
-; CHECK-RV32IF-NEXT:    fsw fa4, 76(sp)
-; CHECK-RV32IF-NEXT:    fsw fa5, 72(sp)
-; CHECK-RV32IF-NEXT:    fsw fa6, 68(sp)
-; CHECK-RV32IF-NEXT:    fsw fa7, 64(sp)
-; CHECK-RV32IF-NEXT:    fsw ft8, 60(sp)
-; CHECK-RV32IF-NEXT:    fsw ft9, 56(sp)
-; CHECK-RV32IF-NEXT:    fsw ft10, 52(sp)
-; CHECK-RV32IF-NEXT:    fsw ft11, 48(sp)
-; CHECK-RV32IF-NEXT:    fsw fs0, 44(sp)
-; CHECK-RV32IF-NEXT:    fsw fs1, 40(sp)
-; CHECK-RV32IF-NEXT:    fsw fs2, 36(sp)
-; CHECK-RV32IF-NEXT:    fsw fs3, 32(sp)
-; CHECK-RV32IF-NEXT:    fsw fs4, 28(sp)
-; CHECK-RV32IF-NEXT:    fsw fs5, 24(sp)
-; CHECK-RV32IF-NEXT:    fsw fs6, 20(sp)
-; CHECK-RV32IF-NEXT:    fsw fs7, 16(sp)
-; CHECK-RV32IF-NEXT:    fsw fs8, 12(sp)
-; CHECK-RV32IF-NEXT:    fsw fs9, 8(sp)
-; CHECK-RV32IF-NEXT:    fsw fs10, 4(sp)
-; CHECK-RV32IF-NEXT:    fsw fs11, 0(sp)
+; CHECK-RV32IF-NEXT:    sw ra, 188(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t0, 184(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t1, 180(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t2, 176(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a0, 172(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a1, 168(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a2, 164(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a3, 160(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a4, 156(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a5, 152(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a6, 148(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a7, 144(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t3, 140(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t4, 136(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t5, 132(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t6, 128(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft1, 120(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft2, 116(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft3, 112(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft4, 108(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft5, 104(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft6, 100(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft7, 96(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa0, 92(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa1, 88(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa2, 84(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa3, 80(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa4, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa6, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa7, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft8, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft9, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft10, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft11, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs0, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs1, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs2, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs3, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs4, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs5, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs6, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs7, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs9, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    lui a1, %hi(h)
 ; CHECK-RV32IF-NEXT:    lw a0, %lo(h)(a1)
 ; CHECK-RV32IF-NEXT:    lw a1, %lo(h+4)(a1)
 ; CHECK-RV32IF-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32IF-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32IF-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32IF-NEXT:    call __adddf3
+; CHECK-RV32IF-NEXT:    call __adddf3 at plt
 ; CHECK-RV32IF-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32IF-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32IF-NEXT:    sw a0, %lo(g)(a2)
-; CHECK-RV32IF-NEXT:    flw fs11, 0(sp)
-; CHECK-RV32IF-NEXT:    flw fs10, 4(sp)
-; CHECK-RV32IF-NEXT:    flw fs9, 8(sp)
-; CHECK-RV32IF-NEXT:    flw fs8, 12(sp)
-; CHECK-RV32IF-NEXT:    flw fs7, 16(sp)
-; CHECK-RV32IF-NEXT:    flw fs6, 20(sp)
-; CHECK-RV32IF-NEXT:    flw fs5, 24(sp)
-; CHECK-RV32IF-NEXT:    flw fs4, 28(sp)
-; CHECK-RV32IF-NEXT:    flw fs3, 32(sp)
-; CHECK-RV32IF-NEXT:    flw fs2, 36(sp)
-; CHECK-RV32IF-NEXT:    flw fs1, 40(sp)
-; CHECK-RV32IF-NEXT:    flw fs0, 44(sp)
-; CHECK-RV32IF-NEXT:    flw ft11, 48(sp)
-; CHECK-RV32IF-NEXT:    flw ft10, 52(sp)
-; CHECK-RV32IF-NEXT:    flw ft9, 56(sp)
-; CHECK-RV32IF-NEXT:    flw ft8, 60(sp)
-; CHECK-RV32IF-NEXT:    flw fa7, 64(sp)
-; CHECK-RV32IF-NEXT:    flw fa6, 68(sp)
-; CHECK-RV32IF-NEXT:    flw fa5, 72(sp)
-; CHECK-RV32IF-NEXT:    flw fa4, 76(sp)
-; CHECK-RV32IF-NEXT:    flw fa3, 80(sp)
-; CHECK-RV32IF-NEXT:    flw fa2, 84(sp)
-; CHECK-RV32IF-NEXT:    flw fa1, 88(sp)
-; CHECK-RV32IF-NEXT:    flw fa0, 92(sp)
-; CHECK-RV32IF-NEXT:    flw ft7, 96(sp)
-; CHECK-RV32IF-NEXT:    flw ft6, 100(sp)
-; CHECK-RV32IF-NEXT:    flw ft5, 104(sp)
-; CHECK-RV32IF-NEXT:    flw ft4, 108(sp)
-; CHECK-RV32IF-NEXT:    flw ft3, 112(sp)
-; CHECK-RV32IF-NEXT:    flw ft2, 116(sp)
-; CHECK-RV32IF-NEXT:    flw ft1, 120(sp)
-; CHECK-RV32IF-NEXT:    flw ft0, 124(sp)
-; CHECK-RV32IF-NEXT:    lw t6, 128(sp)
-; CHECK-RV32IF-NEXT:    lw t5, 132(sp)
-; CHECK-RV32IF-NEXT:    lw t4, 136(sp)
-; CHECK-RV32IF-NEXT:    lw t3, 140(sp)
-; CHECK-RV32IF-NEXT:    lw a7, 144(sp)
-; CHECK-RV32IF-NEXT:    lw a6, 148(sp)
-; CHECK-RV32IF-NEXT:    lw a5, 152(sp)
-; CHECK-RV32IF-NEXT:    lw a4, 156(sp)
-; CHECK-RV32IF-NEXT:    lw a3, 160(sp)
-; CHECK-RV32IF-NEXT:    lw a2, 164(sp)
-; CHECK-RV32IF-NEXT:    lw a1, 168(sp)
-; CHECK-RV32IF-NEXT:    lw a0, 172(sp)
-; CHECK-RV32IF-NEXT:    lw t2, 176(sp)
-; CHECK-RV32IF-NEXT:    lw t1, 180(sp)
-; CHECK-RV32IF-NEXT:    lw t0, 184(sp)
-; CHECK-RV32IF-NEXT:    lw ra, 188(sp)
+; CHECK-RV32IF-NEXT:    flw fs11, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs10, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs9, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs7, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs6, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs5, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs4, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs3, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft11, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft10, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft9, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft8, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa7, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa6, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa4, 76(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa3, 80(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa2, 84(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa1, 88(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa0, 92(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft7, 96(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft6, 100(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft5, 104(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft4, 108(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft3, 112(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft2, 116(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft1, 120(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t6, 128(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t5, 132(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t4, 136(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t3, 140(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a7, 144(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a6, 148(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a5, 152(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a4, 156(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a3, 160(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a2, 164(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a1, 168(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a0, 172(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t2, 176(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t1, 180(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t0, 184(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 192
 ; CHECK-RV32IF-NEXT:    mret
 ;
 ; CHECK-RV32IFD-LABEL: foo_double:
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -32
-; CHECK-RV32IFD-NEXT:    sw a0, 28(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft0, 16(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft1, 8(sp)
+; CHECK-RV32IFD-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft1, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(h)
 ; CHECK-RV32IFD-NEXT:    fld ft0, %lo(h)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(i)
@@ -535,9 +535,9 @@ define void @foo_double() nounwind #0 {
 ; CHECK-RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(g)
 ; CHECK-RV32IFD-NEXT:    fsd ft0, %lo(g)(a0)
-; CHECK-RV32IFD-NEXT:    fld ft1, 8(sp)
-; CHECK-RV32IFD-NEXT:    fld ft0, 16(sp)
-; CHECK-RV32IFD-NEXT:    lw a0, 28(sp)
+; CHECK-RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load double, double* @h
@@ -554,23 +554,23 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32-LABEL: foo_fp_double:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -80
-; CHECK-RV32-NEXT:    sw ra, 76(sp)
-; CHECK-RV32-NEXT:    sw t0, 72(sp)
-; CHECK-RV32-NEXT:    sw t1, 68(sp)
-; CHECK-RV32-NEXT:    sw t2, 64(sp)
-; CHECK-RV32-NEXT:    sw s0, 60(sp)
-; CHECK-RV32-NEXT:    sw a0, 56(sp)
-; CHECK-RV32-NEXT:    sw a1, 52(sp)
-; CHECK-RV32-NEXT:    sw a2, 48(sp)
-; CHECK-RV32-NEXT:    sw a3, 44(sp)
-; CHECK-RV32-NEXT:    sw a4, 40(sp)
-; CHECK-RV32-NEXT:    sw a5, 36(sp)
-; CHECK-RV32-NEXT:    sw a6, 32(sp)
-; CHECK-RV32-NEXT:    sw a7, 28(sp)
-; CHECK-RV32-NEXT:    sw t3, 24(sp)
-; CHECK-RV32-NEXT:    sw t4, 20(sp)
-; CHECK-RV32-NEXT:    sw t5, 16(sp)
-; CHECK-RV32-NEXT:    sw t6, 12(sp)
+; CHECK-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t0, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t1, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t2, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s0, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a3, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a4, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a5, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a6, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a7, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t3, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t4, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    addi s0, sp, 80
 ; CHECK-RV32-NEXT:    lui a1, %hi(h)
 ; CHECK-RV32-NEXT:    lw a0, %lo(h)(a1)
@@ -578,82 +578,82 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32-NEXT:    call __adddf3
+; CHECK-RV32-NEXT:    call __adddf3 at plt
 ; CHECK-RV32-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32-NEXT:    sw a0, %lo(g)(a2)
-; CHECK-RV32-NEXT:    lw t6, 12(sp)
-; CHECK-RV32-NEXT:    lw t5, 16(sp)
-; CHECK-RV32-NEXT:    lw t4, 20(sp)
-; CHECK-RV32-NEXT:    lw t3, 24(sp)
-; CHECK-RV32-NEXT:    lw a7, 28(sp)
-; CHECK-RV32-NEXT:    lw a6, 32(sp)
-; CHECK-RV32-NEXT:    lw a5, 36(sp)
-; CHECK-RV32-NEXT:    lw a4, 40(sp)
-; CHECK-RV32-NEXT:    lw a3, 44(sp)
-; CHECK-RV32-NEXT:    lw a2, 48(sp)
-; CHECK-RV32-NEXT:    lw a1, 52(sp)
-; CHECK-RV32-NEXT:    lw a0, 56(sp)
-; CHECK-RV32-NEXT:    lw s0, 60(sp)
-; CHECK-RV32-NEXT:    lw t2, 64(sp)
-; CHECK-RV32-NEXT:    lw t1, 68(sp)
-; CHECK-RV32-NEXT:    lw t0, 72(sp)
-; CHECK-RV32-NEXT:    lw ra, 76(sp)
+; CHECK-RV32-NEXT:    lw t6, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t4, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t3, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a7, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a6, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a5, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a4, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw s0, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t2, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t1, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t0, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 80
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32IF-LABEL: foo_fp_double:
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -208
-; CHECK-RV32IF-NEXT:    sw ra, 204(sp)
-; CHECK-RV32IF-NEXT:    sw t0, 200(sp)
-; CHECK-RV32IF-NEXT:    sw t1, 196(sp)
-; CHECK-RV32IF-NEXT:    sw t2, 192(sp)
-; CHECK-RV32IF-NEXT:    sw s0, 188(sp)
-; CHECK-RV32IF-NEXT:    sw a0, 184(sp)
-; CHECK-RV32IF-NEXT:    sw a1, 180(sp)
-; CHECK-RV32IF-NEXT:    sw a2, 176(sp)
-; CHECK-RV32IF-NEXT:    sw a3, 172(sp)
-; CHECK-RV32IF-NEXT:    sw a4, 168(sp)
-; CHECK-RV32IF-NEXT:    sw a5, 164(sp)
-; CHECK-RV32IF-NEXT:    sw a6, 160(sp)
-; CHECK-RV32IF-NEXT:    sw a7, 156(sp)
-; CHECK-RV32IF-NEXT:    sw t3, 152(sp)
-; CHECK-RV32IF-NEXT:    sw t4, 148(sp)
-; CHECK-RV32IF-NEXT:    sw t5, 144(sp)
-; CHECK-RV32IF-NEXT:    sw t6, 140(sp)
-; CHECK-RV32IF-NEXT:    fsw ft0, 136(sp)
-; CHECK-RV32IF-NEXT:    fsw ft1, 132(sp)
-; CHECK-RV32IF-NEXT:    fsw ft2, 128(sp)
-; CHECK-RV32IF-NEXT:    fsw ft3, 124(sp)
-; CHECK-RV32IF-NEXT:    fsw ft4, 120(sp)
-; CHECK-RV32IF-NEXT:    fsw ft5, 116(sp)
-; CHECK-RV32IF-NEXT:    fsw ft6, 112(sp)
-; CHECK-RV32IF-NEXT:    fsw ft7, 108(sp)
-; CHECK-RV32IF-NEXT:    fsw fa0, 104(sp)
-; CHECK-RV32IF-NEXT:    fsw fa1, 100(sp)
-; CHECK-RV32IF-NEXT:    fsw fa2, 96(sp)
-; CHECK-RV32IF-NEXT:    fsw fa3, 92(sp)
-; CHECK-RV32IF-NEXT:    fsw fa4, 88(sp)
-; CHECK-RV32IF-NEXT:    fsw fa5, 84(sp)
-; CHECK-RV32IF-NEXT:    fsw fa6, 80(sp)
-; CHECK-RV32IF-NEXT:    fsw fa7, 76(sp)
-; CHECK-RV32IF-NEXT:    fsw ft8, 72(sp)
-; CHECK-RV32IF-NEXT:    fsw ft9, 68(sp)
-; CHECK-RV32IF-NEXT:    fsw ft10, 64(sp)
-; CHECK-RV32IF-NEXT:    fsw ft11, 60(sp)
-; CHECK-RV32IF-NEXT:    fsw fs0, 56(sp)
-; CHECK-RV32IF-NEXT:    fsw fs1, 52(sp)
-; CHECK-RV32IF-NEXT:    fsw fs2, 48(sp)
-; CHECK-RV32IF-NEXT:    fsw fs3, 44(sp)
-; CHECK-RV32IF-NEXT:    fsw fs4, 40(sp)
-; CHECK-RV32IF-NEXT:    fsw fs5, 36(sp)
-; CHECK-RV32IF-NEXT:    fsw fs6, 32(sp)
-; CHECK-RV32IF-NEXT:    fsw fs7, 28(sp)
-; CHECK-RV32IF-NEXT:    fsw fs8, 24(sp)
-; CHECK-RV32IF-NEXT:    fsw fs9, 20(sp)
-; CHECK-RV32IF-NEXT:    fsw fs10, 16(sp)
-; CHECK-RV32IF-NEXT:    fsw fs11, 12(sp)
+; CHECK-RV32IF-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t0, 200(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t1, 196(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t2, 192(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw s0, 188(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a0, 184(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a1, 180(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a2, 176(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a3, 172(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a4, 168(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a5, 164(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a6, 160(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw a7, 156(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t3, 152(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t4, 148(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t5, 144(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    sw t6, 140(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft0, 136(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft1, 132(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft2, 128(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft3, 124(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft4, 120(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft5, 116(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft6, 112(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft7, 108(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa0, 104(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa1, 100(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa2, 96(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa3, 92(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa4, 88(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa5, 84(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa6, 80(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa7, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft8, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft9, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft10, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw ft11, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs3, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs4, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs5, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs6, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs7, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs8, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs9, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs10, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fs11, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    addi s0, sp, 208
 ; CHECK-RV32IF-NEXT:    lui a1, %hi(h)
 ; CHECK-RV32IF-NEXT:    lw a0, %lo(h)(a1)
@@ -661,70 +661,70 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32IF-NEXT:    lui a3, %hi(i)
 ; CHECK-RV32IF-NEXT:    lw a2, %lo(i)(a3)
 ; CHECK-RV32IF-NEXT:    lw a3, %lo(i+4)(a3)
-; CHECK-RV32IF-NEXT:    call __adddf3
+; CHECK-RV32IF-NEXT:    call __adddf3 at plt
 ; CHECK-RV32IF-NEXT:    lui a2, %hi(g)
 ; CHECK-RV32IF-NEXT:    sw a1, %lo(g+4)(a2)
 ; CHECK-RV32IF-NEXT:    sw a0, %lo(g)(a2)
-; CHECK-RV32IF-NEXT:    flw fs11, 12(sp)
-; CHECK-RV32IF-NEXT:    flw fs10, 16(sp)
-; CHECK-RV32IF-NEXT:    flw fs9, 20(sp)
-; CHECK-RV32IF-NEXT:    flw fs8, 24(sp)
-; CHECK-RV32IF-NEXT:    flw fs7, 28(sp)
-; CHECK-RV32IF-NEXT:    flw fs6, 32(sp)
-; CHECK-RV32IF-NEXT:    flw fs5, 36(sp)
-; CHECK-RV32IF-NEXT:    flw fs4, 40(sp)
-; CHECK-RV32IF-NEXT:    flw fs3, 44(sp)
-; CHECK-RV32IF-NEXT:    flw fs2, 48(sp)
-; CHECK-RV32IF-NEXT:    flw fs1, 52(sp)
-; CHECK-RV32IF-NEXT:    flw fs0, 56(sp)
-; CHECK-RV32IF-NEXT:    flw ft11, 60(sp)
-; CHECK-RV32IF-NEXT:    flw ft10, 64(sp)
-; CHECK-RV32IF-NEXT:    flw ft9, 68(sp)
-; CHECK-RV32IF-NEXT:    flw ft8, 72(sp)
-; CHECK-RV32IF-NEXT:    flw fa7, 76(sp)
-; CHECK-RV32IF-NEXT:    flw fa6, 80(sp)
-; CHECK-RV32IF-NEXT:    flw fa5, 84(sp)
-; CHECK-RV32IF-NEXT:    flw fa4, 88(sp)
-; CHECK-RV32IF-NEXT:    flw fa3, 92(sp)
-; CHECK-RV32IF-NEXT:    flw fa2, 96(sp)
-; CHECK-RV32IF-NEXT:    flw fa1, 100(sp)
-; CHECK-RV32IF-NEXT:    flw fa0, 104(sp)
-; CHECK-RV32IF-NEXT:    flw ft7, 108(sp)
-; CHECK-RV32IF-NEXT:    flw ft6, 112(sp)
-; CHECK-RV32IF-NEXT:    flw ft5, 116(sp)
-; CHECK-RV32IF-NEXT:    flw ft4, 120(sp)
-; CHECK-RV32IF-NEXT:    flw ft3, 124(sp)
-; CHECK-RV32IF-NEXT:    flw ft2, 128(sp)
-; CHECK-RV32IF-NEXT:    flw ft1, 132(sp)
-; CHECK-RV32IF-NEXT:    flw ft0, 136(sp)
-; CHECK-RV32IF-NEXT:    lw t6, 140(sp)
-; CHECK-RV32IF-NEXT:    lw t5, 144(sp)
-; CHECK-RV32IF-NEXT:    lw t4, 148(sp)
-; CHECK-RV32IF-NEXT:    lw t3, 152(sp)
-; CHECK-RV32IF-NEXT:    lw a7, 156(sp)
-; CHECK-RV32IF-NEXT:    lw a6, 160(sp)
-; CHECK-RV32IF-NEXT:    lw a5, 164(sp)
-; CHECK-RV32IF-NEXT:    lw a4, 168(sp)
-; CHECK-RV32IF-NEXT:    lw a3, 172(sp)
-; CHECK-RV32IF-NEXT:    lw a2, 176(sp)
-; CHECK-RV32IF-NEXT:    lw a1, 180(sp)
-; CHECK-RV32IF-NEXT:    lw a0, 184(sp)
-; CHECK-RV32IF-NEXT:    lw s0, 188(sp)
-; CHECK-RV32IF-NEXT:    lw t2, 192(sp)
-; CHECK-RV32IF-NEXT:    lw t1, 196(sp)
-; CHECK-RV32IF-NEXT:    lw t0, 200(sp)
-; CHECK-RV32IF-NEXT:    lw ra, 204(sp)
+; CHECK-RV32IF-NEXT:    flw fs11, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs10, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs9, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs8, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs7, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs6, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs5, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs4, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs3, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fs0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft11, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft10, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft9, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft8, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa7, 76(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa6, 80(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa5, 84(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa4, 88(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa3, 92(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa2, 96(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa1, 100(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa0, 104(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft7, 108(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft6, 112(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft5, 116(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft4, 120(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft3, 124(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft2, 128(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft1, 132(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw ft0, 136(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t6, 140(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t5, 144(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t4, 148(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t3, 152(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a7, 156(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a6, 160(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a5, 164(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a4, 168(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a3, 172(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a2, 176(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a1, 180(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw a0, 184(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw s0, 188(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t2, 192(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t1, 196(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw t0, 200(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 208
 ; CHECK-RV32IF-NEXT:    mret
 ;
 ; CHECK-RV32IFD-LABEL: foo_fp_double:
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -32
-; CHECK-RV32IFD-NEXT:    sw ra, 28(sp)
-; CHECK-RV32IFD-NEXT:    sw s0, 24(sp)
-; CHECK-RV32IFD-NEXT:    sw a0, 20(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft0, 8(sp)
-; CHECK-RV32IFD-NEXT:    fsd ft1, 0(sp)
+; CHECK-RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd ft1, 0(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    addi s0, sp, 32
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(h)
 ; CHECK-RV32IFD-NEXT:    fld ft0, %lo(h)(a0)
@@ -733,11 +733,11 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(g)
 ; CHECK-RV32IFD-NEXT:    fsd ft0, %lo(g)(a0)
-; CHECK-RV32IFD-NEXT:    fld ft1, 0(sp)
-; CHECK-RV32IFD-NEXT:    fld ft0, 8(sp)
-; CHECK-RV32IFD-NEXT:    lw a0, 20(sp)
-; CHECK-RV32IFD-NEXT:    lw s0, 24(sp)
-; CHECK-RV32IFD-NEXT:    lw ra, 28(sp)
+; CHECK-RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load double, double* @h

diff  --git a/llvm/test/CodeGen/RISCV/interrupt-attr.ll b/llvm/test/CodeGen/RISCV/interrupt-attr.ll
index 040521a8edf5..8300a5b8ca96 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr.ll
@@ -54,490 +54,490 @@ define void @foo_with_call() #2 {
 ; CHECK-RV32-LABEL: foo_with_call:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    sw ra, 60(sp)
-; CHECK-RV32-NEXT:    sw t0, 56(sp)
-; CHECK-RV32-NEXT:    sw t1, 52(sp)
-; CHECK-RV32-NEXT:    sw t2, 48(sp)
-; CHECK-RV32-NEXT:    sw a0, 44(sp)
-; CHECK-RV32-NEXT:    sw a1, 40(sp)
-; CHECK-RV32-NEXT:    sw a2, 36(sp)
-; CHECK-RV32-NEXT:    sw a3, 32(sp)
-; CHECK-RV32-NEXT:    sw a4, 28(sp)
-; CHECK-RV32-NEXT:    sw a5, 24(sp)
-; CHECK-RV32-NEXT:    sw a6, 20(sp)
-; CHECK-RV32-NEXT:    sw a7, 16(sp)
-; CHECK-RV32-NEXT:    sw t3, 12(sp)
-; CHECK-RV32-NEXT:    sw t4, 8(sp)
-; CHECK-RV32-NEXT:    sw t5, 4(sp)
-; CHECK-RV32-NEXT:    sw t6, 0(sp)
-; CHECK-RV32-NEXT:    call otherfoo
-; CHECK-RV32-NEXT:    lw t6, 0(sp)
-; CHECK-RV32-NEXT:    lw t5, 4(sp)
-; CHECK-RV32-NEXT:    lw t4, 8(sp)
-; CHECK-RV32-NEXT:    lw t3, 12(sp)
-; CHECK-RV32-NEXT:    lw a7, 16(sp)
-; CHECK-RV32-NEXT:    lw a6, 20(sp)
-; CHECK-RV32-NEXT:    lw a5, 24(sp)
-; CHECK-RV32-NEXT:    lw a4, 28(sp)
-; CHECK-RV32-NEXT:    lw a3, 32(sp)
-; CHECK-RV32-NEXT:    lw a2, 36(sp)
-; CHECK-RV32-NEXT:    lw a1, 40(sp)
-; CHECK-RV32-NEXT:    lw a0, 44(sp)
-; CHECK-RV32-NEXT:    lw t2, 48(sp)
-; CHECK-RV32-NEXT:    lw t1, 52(sp)
-; CHECK-RV32-NEXT:    lw t0, 56(sp)
-; CHECK-RV32-NEXT:    lw ra, 60(sp)
+; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a2, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a3, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a4, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a6, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a7, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t3, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t4, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t5, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t6, 0(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    call otherfoo at plt
+; CHECK-RV32-NEXT:    lw t6, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t5, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t4, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t3, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a7, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a1, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 64
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32-F-LABEL: foo_with_call:
 ; CHECK-RV32-F:       # %bb.0:
 ; CHECK-RV32-F-NEXT:    addi sp, sp, -192
-; CHECK-RV32-F-NEXT:    sw ra, 188(sp)
-; CHECK-RV32-F-NEXT:    sw t0, 184(sp)
-; CHECK-RV32-F-NEXT:    sw t1, 180(sp)
-; CHECK-RV32-F-NEXT:    sw t2, 176(sp)
-; CHECK-RV32-F-NEXT:    sw a0, 172(sp)
-; CHECK-RV32-F-NEXT:    sw a1, 168(sp)
-; CHECK-RV32-F-NEXT:    sw a2, 164(sp)
-; CHECK-RV32-F-NEXT:    sw a3, 160(sp)
-; CHECK-RV32-F-NEXT:    sw a4, 156(sp)
-; CHECK-RV32-F-NEXT:    sw a5, 152(sp)
-; CHECK-RV32-F-NEXT:    sw a6, 148(sp)
-; CHECK-RV32-F-NEXT:    sw a7, 144(sp)
-; CHECK-RV32-F-NEXT:    sw t3, 140(sp)
-; CHECK-RV32-F-NEXT:    sw t4, 136(sp)
-; CHECK-RV32-F-NEXT:    sw t5, 132(sp)
-; CHECK-RV32-F-NEXT:    sw t6, 128(sp)
-; CHECK-RV32-F-NEXT:    fsw ft0, 124(sp)
-; CHECK-RV32-F-NEXT:    fsw ft1, 120(sp)
-; CHECK-RV32-F-NEXT:    fsw ft2, 116(sp)
-; CHECK-RV32-F-NEXT:    fsw ft3, 112(sp)
-; CHECK-RV32-F-NEXT:    fsw ft4, 108(sp)
-; CHECK-RV32-F-NEXT:    fsw ft5, 104(sp)
-; CHECK-RV32-F-NEXT:    fsw ft6, 100(sp)
-; CHECK-RV32-F-NEXT:    fsw ft7, 96(sp)
-; CHECK-RV32-F-NEXT:    fsw fa0, 92(sp)
-; CHECK-RV32-F-NEXT:    fsw fa1, 88(sp)
-; CHECK-RV32-F-NEXT:    fsw fa2, 84(sp)
-; CHECK-RV32-F-NEXT:    fsw fa3, 80(sp)
-; CHECK-RV32-F-NEXT:    fsw fa4, 76(sp)
-; CHECK-RV32-F-NEXT:    fsw fa5, 72(sp)
-; CHECK-RV32-F-NEXT:    fsw fa6, 68(sp)
-; CHECK-RV32-F-NEXT:    fsw fa7, 64(sp)
-; CHECK-RV32-F-NEXT:    fsw ft8, 60(sp)
-; CHECK-RV32-F-NEXT:    fsw ft9, 56(sp)
-; CHECK-RV32-F-NEXT:    fsw ft10, 52(sp)
-; CHECK-RV32-F-NEXT:    fsw ft11, 48(sp)
-; CHECK-RV32-F-NEXT:    fsw fs0, 44(sp)
-; CHECK-RV32-F-NEXT:    fsw fs1, 40(sp)
-; CHECK-RV32-F-NEXT:    fsw fs2, 36(sp)
-; CHECK-RV32-F-NEXT:    fsw fs3, 32(sp)
-; CHECK-RV32-F-NEXT:    fsw fs4, 28(sp)
-; CHECK-RV32-F-NEXT:    fsw fs5, 24(sp)
-; CHECK-RV32-F-NEXT:    fsw fs6, 20(sp)
-; CHECK-RV32-F-NEXT:    fsw fs7, 16(sp)
-; CHECK-RV32-F-NEXT:    fsw fs8, 12(sp)
-; CHECK-RV32-F-NEXT:    fsw fs9, 8(sp)
-; CHECK-RV32-F-NEXT:    fsw fs10, 4(sp)
-; CHECK-RV32-F-NEXT:    fsw fs11, 0(sp)
-; CHECK-RV32-F-NEXT:    call otherfoo
-; CHECK-RV32-F-NEXT:    flw fs11, 0(sp)
-; CHECK-RV32-F-NEXT:    flw fs10, 4(sp)
-; CHECK-RV32-F-NEXT:    flw fs9, 8(sp)
-; CHECK-RV32-F-NEXT:    flw fs8, 12(sp)
-; CHECK-RV32-F-NEXT:    flw fs7, 16(sp)
-; CHECK-RV32-F-NEXT:    flw fs6, 20(sp)
-; CHECK-RV32-F-NEXT:    flw fs5, 24(sp)
-; CHECK-RV32-F-NEXT:    flw fs4, 28(sp)
-; CHECK-RV32-F-NEXT:    flw fs3, 32(sp)
-; CHECK-RV32-F-NEXT:    flw fs2, 36(sp)
-; CHECK-RV32-F-NEXT:    flw fs1, 40(sp)
-; CHECK-RV32-F-NEXT:    flw fs0, 44(sp)
-; CHECK-RV32-F-NEXT:    flw ft11, 48(sp)
-; CHECK-RV32-F-NEXT:    flw ft10, 52(sp)
-; CHECK-RV32-F-NEXT:    flw ft9, 56(sp)
-; CHECK-RV32-F-NEXT:    flw ft8, 60(sp)
-; CHECK-RV32-F-NEXT:    flw fa7, 64(sp)
-; CHECK-RV32-F-NEXT:    flw fa6, 68(sp)
-; CHECK-RV32-F-NEXT:    flw fa5, 72(sp)
-; CHECK-RV32-F-NEXT:    flw fa4, 76(sp)
-; CHECK-RV32-F-NEXT:    flw fa3, 80(sp)
-; CHECK-RV32-F-NEXT:    flw fa2, 84(sp)
-; CHECK-RV32-F-NEXT:    flw fa1, 88(sp)
-; CHECK-RV32-F-NEXT:    flw fa0, 92(sp)
-; CHECK-RV32-F-NEXT:    flw ft7, 96(sp)
-; CHECK-RV32-F-NEXT:    flw ft6, 100(sp)
-; CHECK-RV32-F-NEXT:    flw ft5, 104(sp)
-; CHECK-RV32-F-NEXT:    flw ft4, 108(sp)
-; CHECK-RV32-F-NEXT:    flw ft3, 112(sp)
-; CHECK-RV32-F-NEXT:    flw ft2, 116(sp)
-; CHECK-RV32-F-NEXT:    flw ft1, 120(sp)
-; CHECK-RV32-F-NEXT:    flw ft0, 124(sp)
-; CHECK-RV32-F-NEXT:    lw t6, 128(sp)
-; CHECK-RV32-F-NEXT:    lw t5, 132(sp)
-; CHECK-RV32-F-NEXT:    lw t4, 136(sp)
-; CHECK-RV32-F-NEXT:    lw t3, 140(sp)
-; CHECK-RV32-F-NEXT:    lw a7, 144(sp)
-; CHECK-RV32-F-NEXT:    lw a6, 148(sp)
-; CHECK-RV32-F-NEXT:    lw a5, 152(sp)
-; CHECK-RV32-F-NEXT:    lw a4, 156(sp)
-; CHECK-RV32-F-NEXT:    lw a3, 160(sp)
-; CHECK-RV32-F-NEXT:    lw a2, 164(sp)
-; CHECK-RV32-F-NEXT:    lw a1, 168(sp)
-; CHECK-RV32-F-NEXT:    lw a0, 172(sp)
-; CHECK-RV32-F-NEXT:    lw t2, 176(sp)
-; CHECK-RV32-F-NEXT:    lw t1, 180(sp)
-; CHECK-RV32-F-NEXT:    lw t0, 184(sp)
-; CHECK-RV32-F-NEXT:    lw ra, 188(sp)
+; CHECK-RV32-F-NEXT:    sw ra, 188(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t0, 184(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t1, 180(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t2, 176(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a0, 172(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a1, 168(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a2, 164(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a3, 160(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a4, 156(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a5, 152(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a6, 148(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a7, 144(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t3, 140(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t4, 136(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t5, 132(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t6, 128(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft1, 120(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft2, 116(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft3, 112(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft4, 108(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft5, 104(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft6, 100(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft7, 96(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa0, 92(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa1, 88(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa2, 84(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa3, 80(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa4, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa6, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa7, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft8, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft9, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft10, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft11, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs0, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs1, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs2, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs3, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs4, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs5, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs6, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs7, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs9, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    call otherfoo at plt
+; CHECK-RV32-F-NEXT:    flw fs11, 0(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs10, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs9, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs7, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs6, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs5, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs4, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs3, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft11, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft10, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft9, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft8, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa7, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa6, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa4, 76(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa3, 80(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa2, 84(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa1, 88(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa0, 92(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft7, 96(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft6, 100(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft5, 104(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft4, 108(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft3, 112(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft2, 116(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft1, 120(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t6, 128(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t5, 132(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t4, 136(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t3, 140(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a7, 144(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a6, 148(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a5, 152(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a4, 156(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a3, 160(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a2, 164(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a1, 168(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a0, 172(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t2, 176(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t1, 180(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t0, 184(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    addi sp, sp, 192
 ; CHECK-RV32-F-NEXT:    mret
 ;
 ; CHECK-RV32-FD-LABEL: foo_with_call:
 ; CHECK-RV32-FD:       # %bb.0:
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, -320
-; CHECK-RV32-FD-NEXT:    sw ra, 316(sp)
-; CHECK-RV32-FD-NEXT:    sw t0, 312(sp)
-; CHECK-RV32-FD-NEXT:    sw t1, 308(sp)
-; CHECK-RV32-FD-NEXT:    sw t2, 304(sp)
-; CHECK-RV32-FD-NEXT:    sw a0, 300(sp)
-; CHECK-RV32-FD-NEXT:    sw a1, 296(sp)
-; CHECK-RV32-FD-NEXT:    sw a2, 292(sp)
-; CHECK-RV32-FD-NEXT:    sw a3, 288(sp)
-; CHECK-RV32-FD-NEXT:    sw a4, 284(sp)
-; CHECK-RV32-FD-NEXT:    sw a5, 280(sp)
-; CHECK-RV32-FD-NEXT:    sw a6, 276(sp)
-; CHECK-RV32-FD-NEXT:    sw a7, 272(sp)
-; CHECK-RV32-FD-NEXT:    sw t3, 268(sp)
-; CHECK-RV32-FD-NEXT:    sw t4, 264(sp)
-; CHECK-RV32-FD-NEXT:    sw t5, 260(sp)
-; CHECK-RV32-FD-NEXT:    sw t6, 256(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft0, 248(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft1, 240(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft2, 232(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft3, 224(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft4, 216(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft5, 208(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft6, 200(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft7, 192(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa0, 184(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa1, 176(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa2, 168(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa3, 160(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa4, 152(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa5, 144(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa6, 136(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa7, 128(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft8, 120(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft9, 112(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft10, 104(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft11, 96(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs0, 88(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs1, 80(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs2, 72(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs3, 64(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs4, 56(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs5, 48(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs6, 40(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs7, 32(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs8, 24(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs9, 16(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs10, 8(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs11, 0(sp)
-; CHECK-RV32-FD-NEXT:    call otherfoo
-; CHECK-RV32-FD-NEXT:    fld fs11, 0(sp)
-; CHECK-RV32-FD-NEXT:    fld fs10, 8(sp)
-; CHECK-RV32-FD-NEXT:    fld fs9, 16(sp)
-; CHECK-RV32-FD-NEXT:    fld fs8, 24(sp)
-; CHECK-RV32-FD-NEXT:    fld fs7, 32(sp)
-; CHECK-RV32-FD-NEXT:    fld fs6, 40(sp)
-; CHECK-RV32-FD-NEXT:    fld fs5, 48(sp)
-; CHECK-RV32-FD-NEXT:    fld fs4, 56(sp)
-; CHECK-RV32-FD-NEXT:    fld fs3, 64(sp)
-; CHECK-RV32-FD-NEXT:    fld fs2, 72(sp)
-; CHECK-RV32-FD-NEXT:    fld fs1, 80(sp)
-; CHECK-RV32-FD-NEXT:    fld fs0, 88(sp)
-; CHECK-RV32-FD-NEXT:    fld ft11, 96(sp)
-; CHECK-RV32-FD-NEXT:    fld ft10, 104(sp)
-; CHECK-RV32-FD-NEXT:    fld ft9, 112(sp)
-; CHECK-RV32-FD-NEXT:    fld ft8, 120(sp)
-; CHECK-RV32-FD-NEXT:    fld fa7, 128(sp)
-; CHECK-RV32-FD-NEXT:    fld fa6, 136(sp)
-; CHECK-RV32-FD-NEXT:    fld fa5, 144(sp)
-; CHECK-RV32-FD-NEXT:    fld fa4, 152(sp)
-; CHECK-RV32-FD-NEXT:    fld fa3, 160(sp)
-; CHECK-RV32-FD-NEXT:    fld fa2, 168(sp)
-; CHECK-RV32-FD-NEXT:    fld fa1, 176(sp)
-; CHECK-RV32-FD-NEXT:    fld fa0, 184(sp)
-; CHECK-RV32-FD-NEXT:    fld ft7, 192(sp)
-; CHECK-RV32-FD-NEXT:    fld ft6, 200(sp)
-; CHECK-RV32-FD-NEXT:    fld ft5, 208(sp)
-; CHECK-RV32-FD-NEXT:    fld ft4, 216(sp)
-; CHECK-RV32-FD-NEXT:    fld ft3, 224(sp)
-; CHECK-RV32-FD-NEXT:    fld ft2, 232(sp)
-; CHECK-RV32-FD-NEXT:    fld ft1, 240(sp)
-; CHECK-RV32-FD-NEXT:    fld ft0, 248(sp)
-; CHECK-RV32-FD-NEXT:    lw t6, 256(sp)
-; CHECK-RV32-FD-NEXT:    lw t5, 260(sp)
-; CHECK-RV32-FD-NEXT:    lw t4, 264(sp)
-; CHECK-RV32-FD-NEXT:    lw t3, 268(sp)
-; CHECK-RV32-FD-NEXT:    lw a7, 272(sp)
-; CHECK-RV32-FD-NEXT:    lw a6, 276(sp)
-; CHECK-RV32-FD-NEXT:    lw a5, 280(sp)
-; CHECK-RV32-FD-NEXT:    lw a4, 284(sp)
-; CHECK-RV32-FD-NEXT:    lw a3, 288(sp)
-; CHECK-RV32-FD-NEXT:    lw a2, 292(sp)
-; CHECK-RV32-FD-NEXT:    lw a1, 296(sp)
-; CHECK-RV32-FD-NEXT:    lw a0, 300(sp)
-; CHECK-RV32-FD-NEXT:    lw t2, 304(sp)
-; CHECK-RV32-FD-NEXT:    lw t1, 308(sp)
-; CHECK-RV32-FD-NEXT:    lw t0, 312(sp)
-; CHECK-RV32-FD-NEXT:    lw ra, 316(sp)
+; CHECK-RV32-FD-NEXT:    sw ra, 316(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t0, 312(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t1, 308(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t2, 304(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a0, 300(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a1, 296(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a2, 292(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a3, 288(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a4, 284(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a5, 280(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a6, 276(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a7, 272(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t3, 268(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t4, 264(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t5, 260(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t6, 256(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft0, 248(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft1, 240(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft2, 232(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft3, 224(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft4, 216(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft5, 208(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft6, 200(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft7, 192(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa0, 184(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa1, 176(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa2, 168(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa3, 160(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa4, 152(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa5, 144(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa6, 136(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa7, 128(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft8, 120(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft9, 112(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft10, 104(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft11, 96(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs0, 88(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs1, 80(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs2, 72(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs3, 64(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs4, 56(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs5, 48(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs6, 40(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs7, 32(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs8, 24(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs9, 16(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    call otherfoo at plt
+; CHECK-RV32-FD-NEXT:    fld fs11, 0(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs9, 16(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs8, 24(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs7, 32(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs6, 40(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs5, 48(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs4, 56(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs3, 64(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft11, 96(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft10, 104(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft9, 112(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft8, 120(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa7, 128(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa6, 136(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa5, 144(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa4, 152(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa3, 160(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa2, 168(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa1, 176(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa0, 184(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft7, 192(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft6, 200(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft5, 208(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft4, 216(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft3, 224(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft2, 232(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft1, 240(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft0, 248(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t6, 256(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t5, 260(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t4, 264(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t3, 268(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a7, 272(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a6, 276(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a5, 280(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a4, 284(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a3, 288(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a2, 292(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a1, 296(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a0, 300(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t2, 304(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t1, 308(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t0, 312(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw ra, 316(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, 320
 ; CHECK-RV32-FD-NEXT:    mret
 ;
 ; CHECK-RV64-LABEL: foo_with_call:
 ; CHECK-RV64:       # %bb.0:
 ; CHECK-RV64-NEXT:    addi sp, sp, -128
-; CHECK-RV64-NEXT:    sd ra, 120(sp)
-; CHECK-RV64-NEXT:    sd t0, 112(sp)
-; CHECK-RV64-NEXT:    sd t1, 104(sp)
-; CHECK-RV64-NEXT:    sd t2, 96(sp)
-; CHECK-RV64-NEXT:    sd a0, 88(sp)
-; CHECK-RV64-NEXT:    sd a1, 80(sp)
-; CHECK-RV64-NEXT:    sd a2, 72(sp)
-; CHECK-RV64-NEXT:    sd a3, 64(sp)
-; CHECK-RV64-NEXT:    sd a4, 56(sp)
-; CHECK-RV64-NEXT:    sd a5, 48(sp)
-; CHECK-RV64-NEXT:    sd a6, 40(sp)
-; CHECK-RV64-NEXT:    sd a7, 32(sp)
-; CHECK-RV64-NEXT:    sd t3, 24(sp)
-; CHECK-RV64-NEXT:    sd t4, 16(sp)
-; CHECK-RV64-NEXT:    sd t5, 8(sp)
-; CHECK-RV64-NEXT:    sd t6, 0(sp)
-; CHECK-RV64-NEXT:    call otherfoo
-; CHECK-RV64-NEXT:    ld t6, 0(sp)
-; CHECK-RV64-NEXT:    ld t5, 8(sp)
-; CHECK-RV64-NEXT:    ld t4, 16(sp)
-; CHECK-RV64-NEXT:    ld t3, 24(sp)
-; CHECK-RV64-NEXT:    ld a7, 32(sp)
-; CHECK-RV64-NEXT:    ld a6, 40(sp)
-; CHECK-RV64-NEXT:    ld a5, 48(sp)
-; CHECK-RV64-NEXT:    ld a4, 56(sp)
-; CHECK-RV64-NEXT:    ld a3, 64(sp)
-; CHECK-RV64-NEXT:    ld a2, 72(sp)
-; CHECK-RV64-NEXT:    ld a1, 80(sp)
-; CHECK-RV64-NEXT:    ld a0, 88(sp)
-; CHECK-RV64-NEXT:    ld t2, 96(sp)
-; CHECK-RV64-NEXT:    ld t1, 104(sp)
-; CHECK-RV64-NEXT:    ld t0, 112(sp)
-; CHECK-RV64-NEXT:    ld ra, 120(sp)
+; CHECK-RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t0, 112(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t1, 104(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t2, 96(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a1, 80(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a2, 72(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a3, 64(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a4, 56(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a5, 48(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a6, 40(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a7, 32(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t3, 24(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t4, 16(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t5, 8(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t6, 0(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    call otherfoo at plt
+; CHECK-RV64-NEXT:    ld t6, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t5, 8(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t4, 16(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t3, 24(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a7, 32(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a6, 40(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a5, 48(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a4, 56(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a3, 64(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a2, 72(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a1, 80(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t2, 96(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t1, 104(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t0, 112(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    addi sp, sp, 128
 ; CHECK-RV64-NEXT:    mret
 ;
 ; CHECK-RV64-F-LABEL: foo_with_call:
 ; CHECK-RV64-F:       # %bb.0:
 ; CHECK-RV64-F-NEXT:    addi sp, sp, -256
-; CHECK-RV64-F-NEXT:    sd ra, 248(sp)
-; CHECK-RV64-F-NEXT:    sd t0, 240(sp)
-; CHECK-RV64-F-NEXT:    sd t1, 232(sp)
-; CHECK-RV64-F-NEXT:    sd t2, 224(sp)
-; CHECK-RV64-F-NEXT:    sd a0, 216(sp)
-; CHECK-RV64-F-NEXT:    sd a1, 208(sp)
-; CHECK-RV64-F-NEXT:    sd a2, 200(sp)
-; CHECK-RV64-F-NEXT:    sd a3, 192(sp)
-; CHECK-RV64-F-NEXT:    sd a4, 184(sp)
-; CHECK-RV64-F-NEXT:    sd a5, 176(sp)
-; CHECK-RV64-F-NEXT:    sd a6, 168(sp)
-; CHECK-RV64-F-NEXT:    sd a7, 160(sp)
-; CHECK-RV64-F-NEXT:    sd t3, 152(sp)
-; CHECK-RV64-F-NEXT:    sd t4, 144(sp)
-; CHECK-RV64-F-NEXT:    sd t5, 136(sp)
-; CHECK-RV64-F-NEXT:    sd t6, 128(sp)
-; CHECK-RV64-F-NEXT:    fsw ft0, 124(sp)
-; CHECK-RV64-F-NEXT:    fsw ft1, 120(sp)
-; CHECK-RV64-F-NEXT:    fsw ft2, 116(sp)
-; CHECK-RV64-F-NEXT:    fsw ft3, 112(sp)
-; CHECK-RV64-F-NEXT:    fsw ft4, 108(sp)
-; CHECK-RV64-F-NEXT:    fsw ft5, 104(sp)
-; CHECK-RV64-F-NEXT:    fsw ft6, 100(sp)
-; CHECK-RV64-F-NEXT:    fsw ft7, 96(sp)
-; CHECK-RV64-F-NEXT:    fsw fa0, 92(sp)
-; CHECK-RV64-F-NEXT:    fsw fa1, 88(sp)
-; CHECK-RV64-F-NEXT:    fsw fa2, 84(sp)
-; CHECK-RV64-F-NEXT:    fsw fa3, 80(sp)
-; CHECK-RV64-F-NEXT:    fsw fa4, 76(sp)
-; CHECK-RV64-F-NEXT:    fsw fa5, 72(sp)
-; CHECK-RV64-F-NEXT:    fsw fa6, 68(sp)
-; CHECK-RV64-F-NEXT:    fsw fa7, 64(sp)
-; CHECK-RV64-F-NEXT:    fsw ft8, 60(sp)
-; CHECK-RV64-F-NEXT:    fsw ft9, 56(sp)
-; CHECK-RV64-F-NEXT:    fsw ft10, 52(sp)
-; CHECK-RV64-F-NEXT:    fsw ft11, 48(sp)
-; CHECK-RV64-F-NEXT:    fsw fs0, 44(sp)
-; CHECK-RV64-F-NEXT:    fsw fs1, 40(sp)
-; CHECK-RV64-F-NEXT:    fsw fs2, 36(sp)
-; CHECK-RV64-F-NEXT:    fsw fs3, 32(sp)
-; CHECK-RV64-F-NEXT:    fsw fs4, 28(sp)
-; CHECK-RV64-F-NEXT:    fsw fs5, 24(sp)
-; CHECK-RV64-F-NEXT:    fsw fs6, 20(sp)
-; CHECK-RV64-F-NEXT:    fsw fs7, 16(sp)
-; CHECK-RV64-F-NEXT:    fsw fs8, 12(sp)
-; CHECK-RV64-F-NEXT:    fsw fs9, 8(sp)
-; CHECK-RV64-F-NEXT:    fsw fs10, 4(sp)
-; CHECK-RV64-F-NEXT:    fsw fs11, 0(sp)
-; CHECK-RV64-F-NEXT:    call otherfoo
-; CHECK-RV64-F-NEXT:    flw fs11, 0(sp)
-; CHECK-RV64-F-NEXT:    flw fs10, 4(sp)
-; CHECK-RV64-F-NEXT:    flw fs9, 8(sp)
-; CHECK-RV64-F-NEXT:    flw fs8, 12(sp)
-; CHECK-RV64-F-NEXT:    flw fs7, 16(sp)
-; CHECK-RV64-F-NEXT:    flw fs6, 20(sp)
-; CHECK-RV64-F-NEXT:    flw fs5, 24(sp)
-; CHECK-RV64-F-NEXT:    flw fs4, 28(sp)
-; CHECK-RV64-F-NEXT:    flw fs3, 32(sp)
-; CHECK-RV64-F-NEXT:    flw fs2, 36(sp)
-; CHECK-RV64-F-NEXT:    flw fs1, 40(sp)
-; CHECK-RV64-F-NEXT:    flw fs0, 44(sp)
-; CHECK-RV64-F-NEXT:    flw ft11, 48(sp)
-; CHECK-RV64-F-NEXT:    flw ft10, 52(sp)
-; CHECK-RV64-F-NEXT:    flw ft9, 56(sp)
-; CHECK-RV64-F-NEXT:    flw ft8, 60(sp)
-; CHECK-RV64-F-NEXT:    flw fa7, 64(sp)
-; CHECK-RV64-F-NEXT:    flw fa6, 68(sp)
-; CHECK-RV64-F-NEXT:    flw fa5, 72(sp)
-; CHECK-RV64-F-NEXT:    flw fa4, 76(sp)
-; CHECK-RV64-F-NEXT:    flw fa3, 80(sp)
-; CHECK-RV64-F-NEXT:    flw fa2, 84(sp)
-; CHECK-RV64-F-NEXT:    flw fa1, 88(sp)
-; CHECK-RV64-F-NEXT:    flw fa0, 92(sp)
-; CHECK-RV64-F-NEXT:    flw ft7, 96(sp)
-; CHECK-RV64-F-NEXT:    flw ft6, 100(sp)
-; CHECK-RV64-F-NEXT:    flw ft5, 104(sp)
-; CHECK-RV64-F-NEXT:    flw ft4, 108(sp)
-; CHECK-RV64-F-NEXT:    flw ft3, 112(sp)
-; CHECK-RV64-F-NEXT:    flw ft2, 116(sp)
-; CHECK-RV64-F-NEXT:    flw ft1, 120(sp)
-; CHECK-RV64-F-NEXT:    flw ft0, 124(sp)
-; CHECK-RV64-F-NEXT:    ld t6, 128(sp)
-; CHECK-RV64-F-NEXT:    ld t5, 136(sp)
-; CHECK-RV64-F-NEXT:    ld t4, 144(sp)
-; CHECK-RV64-F-NEXT:    ld t3, 152(sp)
-; CHECK-RV64-F-NEXT:    ld a7, 160(sp)
-; CHECK-RV64-F-NEXT:    ld a6, 168(sp)
-; CHECK-RV64-F-NEXT:    ld a5, 176(sp)
-; CHECK-RV64-F-NEXT:    ld a4, 184(sp)
-; CHECK-RV64-F-NEXT:    ld a3, 192(sp)
-; CHECK-RV64-F-NEXT:    ld a2, 200(sp)
-; CHECK-RV64-F-NEXT:    ld a1, 208(sp)
-; CHECK-RV64-F-NEXT:    ld a0, 216(sp)
-; CHECK-RV64-F-NEXT:    ld t2, 224(sp)
-; CHECK-RV64-F-NEXT:    ld t1, 232(sp)
-; CHECK-RV64-F-NEXT:    ld t0, 240(sp)
-; CHECK-RV64-F-NEXT:    ld ra, 248(sp)
+; CHECK-RV64-F-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t0, 240(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t1, 232(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t2, 224(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a0, 216(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a1, 208(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a2, 200(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a3, 192(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a4, 184(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a5, 176(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a6, 168(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a7, 160(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t3, 152(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t4, 144(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t5, 136(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t6, 128(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft1, 120(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft2, 116(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft3, 112(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft4, 108(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft5, 104(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft6, 100(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft7, 96(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa0, 92(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa1, 88(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa2, 84(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa3, 80(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa4, 76(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa6, 68(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa7, 64(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft8, 60(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft9, 56(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft10, 52(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft11, 48(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs0, 44(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs1, 40(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs2, 36(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs3, 32(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs4, 28(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs5, 24(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs6, 20(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs7, 16(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs8, 12(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs9, 8(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    call otherfoo at plt
+; CHECK-RV64-F-NEXT:    flw fs11, 0(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs10, 4(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs9, 8(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs8, 12(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs7, 16(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs6, 20(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs5, 24(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs4, 28(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs3, 32(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft11, 48(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft10, 52(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft9, 56(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft8, 60(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa7, 64(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa6, 68(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa4, 76(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa3, 80(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa2, 84(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa1, 88(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa0, 92(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft7, 96(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft6, 100(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft5, 104(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft4, 108(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft3, 112(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft2, 116(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft1, 120(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t6, 128(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t5, 136(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t4, 144(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t3, 152(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a7, 160(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a6, 168(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a5, 176(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a4, 184(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a3, 192(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a2, 200(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a1, 208(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a0, 216(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t2, 224(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t1, 232(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t0, 240(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; CHECK-RV64-F-NEXT:    addi sp, sp, 256
 ; CHECK-RV64-F-NEXT:    mret
 ;
 ; CHECK-RV64-FD-LABEL: foo_with_call:
 ; CHECK-RV64-FD:       # %bb.0:
 ; CHECK-RV64-FD-NEXT:    addi sp, sp, -384
-; CHECK-RV64-FD-NEXT:    sd ra, 376(sp)
-; CHECK-RV64-FD-NEXT:    sd t0, 368(sp)
-; CHECK-RV64-FD-NEXT:    sd t1, 360(sp)
-; CHECK-RV64-FD-NEXT:    sd t2, 352(sp)
-; CHECK-RV64-FD-NEXT:    sd a0, 344(sp)
-; CHECK-RV64-FD-NEXT:    sd a1, 336(sp)
-; CHECK-RV64-FD-NEXT:    sd a2, 328(sp)
-; CHECK-RV64-FD-NEXT:    sd a3, 320(sp)
-; CHECK-RV64-FD-NEXT:    sd a4, 312(sp)
-; CHECK-RV64-FD-NEXT:    sd a5, 304(sp)
-; CHECK-RV64-FD-NEXT:    sd a6, 296(sp)
-; CHECK-RV64-FD-NEXT:    sd a7, 288(sp)
-; CHECK-RV64-FD-NEXT:    sd t3, 280(sp)
-; CHECK-RV64-FD-NEXT:    sd t4, 272(sp)
-; CHECK-RV64-FD-NEXT:    sd t5, 264(sp)
-; CHECK-RV64-FD-NEXT:    sd t6, 256(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft0, 248(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft1, 240(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft2, 232(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft3, 224(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft4, 216(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft5, 208(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft6, 200(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft7, 192(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa0, 184(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa1, 176(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa2, 168(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa3, 160(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa4, 152(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa5, 144(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa6, 136(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa7, 128(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft8, 120(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft9, 112(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft10, 104(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft11, 96(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs0, 88(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs1, 80(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs2, 72(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs3, 64(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs4, 56(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs5, 48(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs6, 40(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs7, 32(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs8, 24(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs9, 16(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs10, 8(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs11, 0(sp)
-; CHECK-RV64-FD-NEXT:    call otherfoo
-; CHECK-RV64-FD-NEXT:    fld fs11, 0(sp)
-; CHECK-RV64-FD-NEXT:    fld fs10, 8(sp)
-; CHECK-RV64-FD-NEXT:    fld fs9, 16(sp)
-; CHECK-RV64-FD-NEXT:    fld fs8, 24(sp)
-; CHECK-RV64-FD-NEXT:    fld fs7, 32(sp)
-; CHECK-RV64-FD-NEXT:    fld fs6, 40(sp)
-; CHECK-RV64-FD-NEXT:    fld fs5, 48(sp)
-; CHECK-RV64-FD-NEXT:    fld fs4, 56(sp)
-; CHECK-RV64-FD-NEXT:    fld fs3, 64(sp)
-; CHECK-RV64-FD-NEXT:    fld fs2, 72(sp)
-; CHECK-RV64-FD-NEXT:    fld fs1, 80(sp)
-; CHECK-RV64-FD-NEXT:    fld fs0, 88(sp)
-; CHECK-RV64-FD-NEXT:    fld ft11, 96(sp)
-; CHECK-RV64-FD-NEXT:    fld ft10, 104(sp)
-; CHECK-RV64-FD-NEXT:    fld ft9, 112(sp)
-; CHECK-RV64-FD-NEXT:    fld ft8, 120(sp)
-; CHECK-RV64-FD-NEXT:    fld fa7, 128(sp)
-; CHECK-RV64-FD-NEXT:    fld fa6, 136(sp)
-; CHECK-RV64-FD-NEXT:    fld fa5, 144(sp)
-; CHECK-RV64-FD-NEXT:    fld fa4, 152(sp)
-; CHECK-RV64-FD-NEXT:    fld fa3, 160(sp)
-; CHECK-RV64-FD-NEXT:    fld fa2, 168(sp)
-; CHECK-RV64-FD-NEXT:    fld fa1, 176(sp)
-; CHECK-RV64-FD-NEXT:    fld fa0, 184(sp)
-; CHECK-RV64-FD-NEXT:    fld ft7, 192(sp)
-; CHECK-RV64-FD-NEXT:    fld ft6, 200(sp)
-; CHECK-RV64-FD-NEXT:    fld ft5, 208(sp)
-; CHECK-RV64-FD-NEXT:    fld ft4, 216(sp)
-; CHECK-RV64-FD-NEXT:    fld ft3, 224(sp)
-; CHECK-RV64-FD-NEXT:    fld ft2, 232(sp)
-; CHECK-RV64-FD-NEXT:    fld ft1, 240(sp)
-; CHECK-RV64-FD-NEXT:    fld ft0, 248(sp)
-; CHECK-RV64-FD-NEXT:    ld t6, 256(sp)
-; CHECK-RV64-FD-NEXT:    ld t5, 264(sp)
-; CHECK-RV64-FD-NEXT:    ld t4, 272(sp)
-; CHECK-RV64-FD-NEXT:    ld t3, 280(sp)
-; CHECK-RV64-FD-NEXT:    ld a7, 288(sp)
-; CHECK-RV64-FD-NEXT:    ld a6, 296(sp)
-; CHECK-RV64-FD-NEXT:    ld a5, 304(sp)
-; CHECK-RV64-FD-NEXT:    ld a4, 312(sp)
-; CHECK-RV64-FD-NEXT:    ld a3, 320(sp)
-; CHECK-RV64-FD-NEXT:    ld a2, 328(sp)
-; CHECK-RV64-FD-NEXT:    ld a1, 336(sp)
-; CHECK-RV64-FD-NEXT:    ld a0, 344(sp)
-; CHECK-RV64-FD-NEXT:    ld t2, 352(sp)
-; CHECK-RV64-FD-NEXT:    ld t1, 360(sp)
-; CHECK-RV64-FD-NEXT:    ld t0, 368(sp)
-; CHECK-RV64-FD-NEXT:    ld ra, 376(sp)
+; CHECK-RV64-FD-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t0, 368(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t1, 360(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t2, 352(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a0, 344(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a1, 336(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a2, 328(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a3, 320(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a4, 312(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a5, 304(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a6, 296(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a7, 288(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t3, 280(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t4, 272(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t5, 264(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t6, 256(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft0, 248(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft1, 240(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft2, 232(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft3, 224(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft4, 216(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft5, 208(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft6, 200(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft7, 192(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa0, 184(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa1, 176(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa2, 168(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa3, 160(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa4, 152(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa5, 144(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa6, 136(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa7, 128(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft8, 120(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft9, 112(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft10, 104(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft11, 96(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs0, 88(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs1, 80(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs2, 72(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs3, 64(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs4, 56(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs5, 48(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs6, 40(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs7, 32(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs8, 24(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs9, 16(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    call otherfoo at plt
+; CHECK-RV64-FD-NEXT:    fld fs11, 0(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs10, 8(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs9, 16(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs8, 24(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs7, 32(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs6, 40(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs5, 48(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs4, 56(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs3, 64(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft11, 96(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft10, 104(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft9, 112(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft8, 120(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa7, 128(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa6, 136(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa5, 144(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa4, 152(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa3, 160(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa2, 168(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa1, 176(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa0, 184(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft7, 192(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft6, 200(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft5, 208(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft4, 216(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft3, 224(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft2, 232(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft1, 240(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft0, 248(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t6, 256(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t5, 264(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t4, 272(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t3, 280(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a7, 288(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a6, 296(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a5, 304(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a4, 312(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a3, 320(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a2, 328(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a1, 336(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a0, 344(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t2, 352(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t1, 360(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t0, 368(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; CHECK-RV64-FD-NEXT:    addi sp, sp, 384
 ; CHECK-RV64-FD-NEXT:    mret
   %call = call i32 bitcast (i32 (...)* @otherfoo to i32 ()*)()
@@ -552,508 +552,508 @@ define void @foo_fp_with_call() #3 {
 ; CHECK-RV32-LABEL: foo_fp_with_call:
 ; CHECK-RV32:       # %bb.0:
 ; CHECK-RV32-NEXT:    addi sp, sp, -80
-; CHECK-RV32-NEXT:    sw ra, 76(sp)
-; CHECK-RV32-NEXT:    sw t0, 72(sp)
-; CHECK-RV32-NEXT:    sw t1, 68(sp)
-; CHECK-RV32-NEXT:    sw t2, 64(sp)
-; CHECK-RV32-NEXT:    sw s0, 60(sp)
-; CHECK-RV32-NEXT:    sw a0, 56(sp)
-; CHECK-RV32-NEXT:    sw a1, 52(sp)
-; CHECK-RV32-NEXT:    sw a2, 48(sp)
-; CHECK-RV32-NEXT:    sw a3, 44(sp)
-; CHECK-RV32-NEXT:    sw a4, 40(sp)
-; CHECK-RV32-NEXT:    sw a5, 36(sp)
-; CHECK-RV32-NEXT:    sw a6, 32(sp)
-; CHECK-RV32-NEXT:    sw a7, 28(sp)
-; CHECK-RV32-NEXT:    sw t3, 24(sp)
-; CHECK-RV32-NEXT:    sw t4, 20(sp)
-; CHECK-RV32-NEXT:    sw t5, 16(sp)
-; CHECK-RV32-NEXT:    sw t6, 12(sp)
+; CHECK-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t0, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t1, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t2, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s0, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a3, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a4, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a5, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a6, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw a7, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t3, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t4, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32-NEXT:    addi s0, sp, 80
-; CHECK-RV32-NEXT:    call otherfoo
-; CHECK-RV32-NEXT:    lw t6, 12(sp)
-; CHECK-RV32-NEXT:    lw t5, 16(sp)
-; CHECK-RV32-NEXT:    lw t4, 20(sp)
-; CHECK-RV32-NEXT:    lw t3, 24(sp)
-; CHECK-RV32-NEXT:    lw a7, 28(sp)
-; CHECK-RV32-NEXT:    lw a6, 32(sp)
-; CHECK-RV32-NEXT:    lw a5, 36(sp)
-; CHECK-RV32-NEXT:    lw a4, 40(sp)
-; CHECK-RV32-NEXT:    lw a3, 44(sp)
-; CHECK-RV32-NEXT:    lw a2, 48(sp)
-; CHECK-RV32-NEXT:    lw a1, 52(sp)
-; CHECK-RV32-NEXT:    lw a0, 56(sp)
-; CHECK-RV32-NEXT:    lw s0, 60(sp)
-; CHECK-RV32-NEXT:    lw t2, 64(sp)
-; CHECK-RV32-NEXT:    lw t1, 68(sp)
-; CHECK-RV32-NEXT:    lw t0, 72(sp)
-; CHECK-RV32-NEXT:    lw ra, 76(sp)
+; CHECK-RV32-NEXT:    call otherfoo at plt
+; CHECK-RV32-NEXT:    lw t6, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t4, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t3, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a7, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a6, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a5, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a4, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw a0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw s0, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t2, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t1, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw t0, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; CHECK-RV32-NEXT:    addi sp, sp, 80
 ; CHECK-RV32-NEXT:    mret
 ;
 ; CHECK-RV32-F-LABEL: foo_fp_with_call:
 ; CHECK-RV32-F:       # %bb.0:
 ; CHECK-RV32-F-NEXT:    addi sp, sp, -208
-; CHECK-RV32-F-NEXT:    sw ra, 204(sp)
-; CHECK-RV32-F-NEXT:    sw t0, 200(sp)
-; CHECK-RV32-F-NEXT:    sw t1, 196(sp)
-; CHECK-RV32-F-NEXT:    sw t2, 192(sp)
-; CHECK-RV32-F-NEXT:    sw s0, 188(sp)
-; CHECK-RV32-F-NEXT:    sw a0, 184(sp)
-; CHECK-RV32-F-NEXT:    sw a1, 180(sp)
-; CHECK-RV32-F-NEXT:    sw a2, 176(sp)
-; CHECK-RV32-F-NEXT:    sw a3, 172(sp)
-; CHECK-RV32-F-NEXT:    sw a4, 168(sp)
-; CHECK-RV32-F-NEXT:    sw a5, 164(sp)
-; CHECK-RV32-F-NEXT:    sw a6, 160(sp)
-; CHECK-RV32-F-NEXT:    sw a7, 156(sp)
-; CHECK-RV32-F-NEXT:    sw t3, 152(sp)
-; CHECK-RV32-F-NEXT:    sw t4, 148(sp)
-; CHECK-RV32-F-NEXT:    sw t5, 144(sp)
-; CHECK-RV32-F-NEXT:    sw t6, 140(sp)
-; CHECK-RV32-F-NEXT:    fsw ft0, 136(sp)
-; CHECK-RV32-F-NEXT:    fsw ft1, 132(sp)
-; CHECK-RV32-F-NEXT:    fsw ft2, 128(sp)
-; CHECK-RV32-F-NEXT:    fsw ft3, 124(sp)
-; CHECK-RV32-F-NEXT:    fsw ft4, 120(sp)
-; CHECK-RV32-F-NEXT:    fsw ft5, 116(sp)
-; CHECK-RV32-F-NEXT:    fsw ft6, 112(sp)
-; CHECK-RV32-F-NEXT:    fsw ft7, 108(sp)
-; CHECK-RV32-F-NEXT:    fsw fa0, 104(sp)
-; CHECK-RV32-F-NEXT:    fsw fa1, 100(sp)
-; CHECK-RV32-F-NEXT:    fsw fa2, 96(sp)
-; CHECK-RV32-F-NEXT:    fsw fa3, 92(sp)
-; CHECK-RV32-F-NEXT:    fsw fa4, 88(sp)
-; CHECK-RV32-F-NEXT:    fsw fa5, 84(sp)
-; CHECK-RV32-F-NEXT:    fsw fa6, 80(sp)
-; CHECK-RV32-F-NEXT:    fsw fa7, 76(sp)
-; CHECK-RV32-F-NEXT:    fsw ft8, 72(sp)
-; CHECK-RV32-F-NEXT:    fsw ft9, 68(sp)
-; CHECK-RV32-F-NEXT:    fsw ft10, 64(sp)
-; CHECK-RV32-F-NEXT:    fsw ft11, 60(sp)
-; CHECK-RV32-F-NEXT:    fsw fs0, 56(sp)
-; CHECK-RV32-F-NEXT:    fsw fs1, 52(sp)
-; CHECK-RV32-F-NEXT:    fsw fs2, 48(sp)
-; CHECK-RV32-F-NEXT:    fsw fs3, 44(sp)
-; CHECK-RV32-F-NEXT:    fsw fs4, 40(sp)
-; CHECK-RV32-F-NEXT:    fsw fs5, 36(sp)
-; CHECK-RV32-F-NEXT:    fsw fs6, 32(sp)
-; CHECK-RV32-F-NEXT:    fsw fs7, 28(sp)
-; CHECK-RV32-F-NEXT:    fsw fs8, 24(sp)
-; CHECK-RV32-F-NEXT:    fsw fs9, 20(sp)
-; CHECK-RV32-F-NEXT:    fsw fs10, 16(sp)
-; CHECK-RV32-F-NEXT:    fsw fs11, 12(sp)
+; CHECK-RV32-F-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t0, 200(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t1, 196(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t2, 192(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw s0, 188(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a0, 184(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a1, 180(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a2, 176(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a3, 172(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a4, 168(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a5, 164(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a6, 160(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw a7, 156(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t3, 152(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t4, 148(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t5, 144(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    sw t6, 140(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft0, 136(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft1, 132(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft2, 128(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft3, 124(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft4, 120(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft5, 116(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft6, 112(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft7, 108(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa0, 104(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa1, 100(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa2, 96(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa3, 92(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa4, 88(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa5, 84(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa6, 80(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fa7, 76(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft8, 72(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft9, 68(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft10, 64(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw ft11, 60(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs0, 56(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs1, 52(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs2, 48(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs3, 44(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs4, 40(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs5, 36(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs6, 32(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs7, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs8, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs9, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs10, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-F-NEXT:    fsw fs11, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32-F-NEXT:    addi s0, sp, 208
-; CHECK-RV32-F-NEXT:    call otherfoo
-; CHECK-RV32-F-NEXT:    flw fs11, 12(sp)
-; CHECK-RV32-F-NEXT:    flw fs10, 16(sp)
-; CHECK-RV32-F-NEXT:    flw fs9, 20(sp)
-; CHECK-RV32-F-NEXT:    flw fs8, 24(sp)
-; CHECK-RV32-F-NEXT:    flw fs7, 28(sp)
-; CHECK-RV32-F-NEXT:    flw fs6, 32(sp)
-; CHECK-RV32-F-NEXT:    flw fs5, 36(sp)
-; CHECK-RV32-F-NEXT:    flw fs4, 40(sp)
-; CHECK-RV32-F-NEXT:    flw fs3, 44(sp)
-; CHECK-RV32-F-NEXT:    flw fs2, 48(sp)
-; CHECK-RV32-F-NEXT:    flw fs1, 52(sp)
-; CHECK-RV32-F-NEXT:    flw fs0, 56(sp)
-; CHECK-RV32-F-NEXT:    flw ft11, 60(sp)
-; CHECK-RV32-F-NEXT:    flw ft10, 64(sp)
-; CHECK-RV32-F-NEXT:    flw ft9, 68(sp)
-; CHECK-RV32-F-NEXT:    flw ft8, 72(sp)
-; CHECK-RV32-F-NEXT:    flw fa7, 76(sp)
-; CHECK-RV32-F-NEXT:    flw fa6, 80(sp)
-; CHECK-RV32-F-NEXT:    flw fa5, 84(sp)
-; CHECK-RV32-F-NEXT:    flw fa4, 88(sp)
-; CHECK-RV32-F-NEXT:    flw fa3, 92(sp)
-; CHECK-RV32-F-NEXT:    flw fa2, 96(sp)
-; CHECK-RV32-F-NEXT:    flw fa1, 100(sp)
-; CHECK-RV32-F-NEXT:    flw fa0, 104(sp)
-; CHECK-RV32-F-NEXT:    flw ft7, 108(sp)
-; CHECK-RV32-F-NEXT:    flw ft6, 112(sp)
-; CHECK-RV32-F-NEXT:    flw ft5, 116(sp)
-; CHECK-RV32-F-NEXT:    flw ft4, 120(sp)
-; CHECK-RV32-F-NEXT:    flw ft3, 124(sp)
-; CHECK-RV32-F-NEXT:    flw ft2, 128(sp)
-; CHECK-RV32-F-NEXT:    flw ft1, 132(sp)
-; CHECK-RV32-F-NEXT:    flw ft0, 136(sp)
-; CHECK-RV32-F-NEXT:    lw t6, 140(sp)
-; CHECK-RV32-F-NEXT:    lw t5, 144(sp)
-; CHECK-RV32-F-NEXT:    lw t4, 148(sp)
-; CHECK-RV32-F-NEXT:    lw t3, 152(sp)
-; CHECK-RV32-F-NEXT:    lw a7, 156(sp)
-; CHECK-RV32-F-NEXT:    lw a6, 160(sp)
-; CHECK-RV32-F-NEXT:    lw a5, 164(sp)
-; CHECK-RV32-F-NEXT:    lw a4, 168(sp)
-; CHECK-RV32-F-NEXT:    lw a3, 172(sp)
-; CHECK-RV32-F-NEXT:    lw a2, 176(sp)
-; CHECK-RV32-F-NEXT:    lw a1, 180(sp)
-; CHECK-RV32-F-NEXT:    lw a0, 184(sp)
-; CHECK-RV32-F-NEXT:    lw s0, 188(sp)
-; CHECK-RV32-F-NEXT:    lw t2, 192(sp)
-; CHECK-RV32-F-NEXT:    lw t1, 196(sp)
-; CHECK-RV32-F-NEXT:    lw t0, 200(sp)
-; CHECK-RV32-F-NEXT:    lw ra, 204(sp)
+; CHECK-RV32-F-NEXT:    call otherfoo at plt
+; CHECK-RV32-F-NEXT:    flw fs11, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs10, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs9, 20(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs8, 24(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs7, 28(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs6, 32(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs5, 36(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs4, 40(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs3, 44(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs2, 48(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs1, 52(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fs0, 56(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft11, 60(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft10, 64(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft9, 68(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft8, 72(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa7, 76(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa6, 80(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa5, 84(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa4, 88(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa3, 92(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa2, 96(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa1, 100(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw fa0, 104(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft7, 108(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft6, 112(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft5, 116(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft4, 120(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft3, 124(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft2, 128(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft1, 132(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    flw ft0, 136(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t6, 140(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t5, 144(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t4, 148(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t3, 152(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a7, 156(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a6, 160(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a5, 164(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a4, 168(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a3, 172(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a2, 176(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a1, 180(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw a0, 184(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw s0, 188(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t2, 192(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t1, 196(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw t0, 200(sp) # 4-byte Folded Reload
+; CHECK-RV32-F-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
 ; CHECK-RV32-F-NEXT:    addi sp, sp, 208
 ; CHECK-RV32-F-NEXT:    mret
 ;
 ; CHECK-RV32-FD-LABEL: foo_fp_with_call:
 ; CHECK-RV32-FD:       # %bb.0:
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, -336
-; CHECK-RV32-FD-NEXT:    sw ra, 332(sp)
-; CHECK-RV32-FD-NEXT:    sw t0, 328(sp)
-; CHECK-RV32-FD-NEXT:    sw t1, 324(sp)
-; CHECK-RV32-FD-NEXT:    sw t2, 320(sp)
-; CHECK-RV32-FD-NEXT:    sw s0, 316(sp)
-; CHECK-RV32-FD-NEXT:    sw a0, 312(sp)
-; CHECK-RV32-FD-NEXT:    sw a1, 308(sp)
-; CHECK-RV32-FD-NEXT:    sw a2, 304(sp)
-; CHECK-RV32-FD-NEXT:    sw a3, 300(sp)
-; CHECK-RV32-FD-NEXT:    sw a4, 296(sp)
-; CHECK-RV32-FD-NEXT:    sw a5, 292(sp)
-; CHECK-RV32-FD-NEXT:    sw a6, 288(sp)
-; CHECK-RV32-FD-NEXT:    sw a7, 284(sp)
-; CHECK-RV32-FD-NEXT:    sw t3, 280(sp)
-; CHECK-RV32-FD-NEXT:    sw t4, 276(sp)
-; CHECK-RV32-FD-NEXT:    sw t5, 272(sp)
-; CHECK-RV32-FD-NEXT:    sw t6, 268(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft0, 256(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft1, 248(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft2, 240(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft3, 232(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft4, 224(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft5, 216(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft6, 208(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft7, 200(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa0, 192(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa1, 184(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa2, 176(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa3, 168(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa4, 160(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa5, 152(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa6, 144(sp)
-; CHECK-RV32-FD-NEXT:    fsd fa7, 136(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft8, 128(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft9, 120(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft10, 112(sp)
-; CHECK-RV32-FD-NEXT:    fsd ft11, 104(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs0, 96(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs1, 88(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs2, 80(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs3, 72(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs4, 64(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs5, 56(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs6, 48(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs7, 40(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs8, 32(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs9, 24(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs10, 16(sp)
-; CHECK-RV32-FD-NEXT:    fsd fs11, 8(sp)
+; CHECK-RV32-FD-NEXT:    sw ra, 332(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t0, 328(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t1, 324(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t2, 320(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw s0, 316(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a0, 312(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a1, 308(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a2, 304(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a3, 300(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a4, 296(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a5, 292(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a6, 288(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw a7, 284(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t3, 280(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t4, 276(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t5, 272(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    sw t6, 268(sp) # 4-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft0, 256(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft1, 248(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft2, 240(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft3, 232(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft4, 224(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft5, 216(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft6, 208(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft7, 200(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa0, 192(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa1, 184(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa2, 176(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa3, 168(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa4, 160(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa5, 152(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa6, 144(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fa7, 136(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft8, 128(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft9, 120(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft10, 112(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd ft11, 104(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs0, 96(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs1, 88(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs2, 80(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs3, 72(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs4, 64(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs5, 56(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs6, 48(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs7, 40(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs8, 32(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs9, 24(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs10, 16(sp) # 8-byte Folded Spill
+; CHECK-RV32-FD-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32-FD-NEXT:    addi s0, sp, 336
-; CHECK-RV32-FD-NEXT:    call otherfoo
-; CHECK-RV32-FD-NEXT:    fld fs11, 8(sp)
-; CHECK-RV32-FD-NEXT:    fld fs10, 16(sp)
-; CHECK-RV32-FD-NEXT:    fld fs9, 24(sp)
-; CHECK-RV32-FD-NEXT:    fld fs8, 32(sp)
-; CHECK-RV32-FD-NEXT:    fld fs7, 40(sp)
-; CHECK-RV32-FD-NEXT:    fld fs6, 48(sp)
-; CHECK-RV32-FD-NEXT:    fld fs5, 56(sp)
-; CHECK-RV32-FD-NEXT:    fld fs4, 64(sp)
-; CHECK-RV32-FD-NEXT:    fld fs3, 72(sp)
-; CHECK-RV32-FD-NEXT:    fld fs2, 80(sp)
-; CHECK-RV32-FD-NEXT:    fld fs1, 88(sp)
-; CHECK-RV32-FD-NEXT:    fld fs0, 96(sp)
-; CHECK-RV32-FD-NEXT:    fld ft11, 104(sp)
-; CHECK-RV32-FD-NEXT:    fld ft10, 112(sp)
-; CHECK-RV32-FD-NEXT:    fld ft9, 120(sp)
-; CHECK-RV32-FD-NEXT:    fld ft8, 128(sp)
-; CHECK-RV32-FD-NEXT:    fld fa7, 136(sp)
-; CHECK-RV32-FD-NEXT:    fld fa6, 144(sp)
-; CHECK-RV32-FD-NEXT:    fld fa5, 152(sp)
-; CHECK-RV32-FD-NEXT:    fld fa4, 160(sp)
-; CHECK-RV32-FD-NEXT:    fld fa3, 168(sp)
-; CHECK-RV32-FD-NEXT:    fld fa2, 176(sp)
-; CHECK-RV32-FD-NEXT:    fld fa1, 184(sp)
-; CHECK-RV32-FD-NEXT:    fld fa0, 192(sp)
-; CHECK-RV32-FD-NEXT:    fld ft7, 200(sp)
-; CHECK-RV32-FD-NEXT:    fld ft6, 208(sp)
-; CHECK-RV32-FD-NEXT:    fld ft5, 216(sp)
-; CHECK-RV32-FD-NEXT:    fld ft4, 224(sp)
-; CHECK-RV32-FD-NEXT:    fld ft3, 232(sp)
-; CHECK-RV32-FD-NEXT:    fld ft2, 240(sp)
-; CHECK-RV32-FD-NEXT:    fld ft1, 248(sp)
-; CHECK-RV32-FD-NEXT:    fld ft0, 256(sp)
-; CHECK-RV32-FD-NEXT:    lw t6, 268(sp)
-; CHECK-RV32-FD-NEXT:    lw t5, 272(sp)
-; CHECK-RV32-FD-NEXT:    lw t4, 276(sp)
-; CHECK-RV32-FD-NEXT:    lw t3, 280(sp)
-; CHECK-RV32-FD-NEXT:    lw a7, 284(sp)
-; CHECK-RV32-FD-NEXT:    lw a6, 288(sp)
-; CHECK-RV32-FD-NEXT:    lw a5, 292(sp)
-; CHECK-RV32-FD-NEXT:    lw a4, 296(sp)
-; CHECK-RV32-FD-NEXT:    lw a3, 300(sp)
-; CHECK-RV32-FD-NEXT:    lw a2, 304(sp)
-; CHECK-RV32-FD-NEXT:    lw a1, 308(sp)
-; CHECK-RV32-FD-NEXT:    lw a0, 312(sp)
-; CHECK-RV32-FD-NEXT:    lw s0, 316(sp)
-; CHECK-RV32-FD-NEXT:    lw t2, 320(sp)
-; CHECK-RV32-FD-NEXT:    lw t1, 324(sp)
-; CHECK-RV32-FD-NEXT:    lw t0, 328(sp)
-; CHECK-RV32-FD-NEXT:    lw ra, 332(sp)
+; CHECK-RV32-FD-NEXT:    call otherfoo at plt
+; CHECK-RV32-FD-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs10, 16(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs9, 24(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs8, 32(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs7, 40(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs6, 48(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs5, 56(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs4, 64(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs3, 72(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs2, 80(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs1, 88(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fs0, 96(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft11, 104(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft10, 112(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft9, 120(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft8, 128(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa7, 136(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa6, 144(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa5, 152(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa4, 160(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa3, 168(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa2, 176(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa1, 184(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld fa0, 192(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft7, 200(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft6, 208(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft5, 216(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft4, 224(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft3, 232(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft2, 240(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft1, 248(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    fld ft0, 256(sp) # 8-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t6, 268(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t5, 272(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t4, 276(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t3, 280(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a7, 284(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a6, 288(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a5, 292(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a4, 296(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a3, 300(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a2, 304(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a1, 308(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw a0, 312(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw s0, 316(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t2, 320(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t1, 324(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw t0, 328(sp) # 4-byte Folded Reload
+; CHECK-RV32-FD-NEXT:    lw ra, 332(sp) # 4-byte Folded Reload
 ; CHECK-RV32-FD-NEXT:    addi sp, sp, 336
 ; CHECK-RV32-FD-NEXT:    mret
 ;
 ; CHECK-RV64-LABEL: foo_fp_with_call:
 ; CHECK-RV64:       # %bb.0:
 ; CHECK-RV64-NEXT:    addi sp, sp, -144
-; CHECK-RV64-NEXT:    sd ra, 136(sp)
-; CHECK-RV64-NEXT:    sd t0, 128(sp)
-; CHECK-RV64-NEXT:    sd t1, 120(sp)
-; CHECK-RV64-NEXT:    sd t2, 112(sp)
-; CHECK-RV64-NEXT:    sd s0, 104(sp)
-; CHECK-RV64-NEXT:    sd a0, 96(sp)
-; CHECK-RV64-NEXT:    sd a1, 88(sp)
-; CHECK-RV64-NEXT:    sd a2, 80(sp)
-; CHECK-RV64-NEXT:    sd a3, 72(sp)
-; CHECK-RV64-NEXT:    sd a4, 64(sp)
-; CHECK-RV64-NEXT:    sd a5, 56(sp)
-; CHECK-RV64-NEXT:    sd a6, 48(sp)
-; CHECK-RV64-NEXT:    sd a7, 40(sp)
-; CHECK-RV64-NEXT:    sd t3, 32(sp)
-; CHECK-RV64-NEXT:    sd t4, 24(sp)
-; CHECK-RV64-NEXT:    sd t5, 16(sp)
-; CHECK-RV64-NEXT:    sd t6, 8(sp)
+; CHECK-RV64-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t0, 128(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t1, 120(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t2, 112(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd s0, 104(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a0, 96(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a1, 88(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a2, 80(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a3, 72(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a4, 64(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a5, 56(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a6, 48(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd a7, 40(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t3, 32(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-NEXT:    addi s0, sp, 144
-; CHECK-RV64-NEXT:    call otherfoo
-; CHECK-RV64-NEXT:    ld t6, 8(sp)
-; CHECK-RV64-NEXT:    ld t5, 16(sp)
-; CHECK-RV64-NEXT:    ld t4, 24(sp)
-; CHECK-RV64-NEXT:    ld t3, 32(sp)
-; CHECK-RV64-NEXT:    ld a7, 40(sp)
-; CHECK-RV64-NEXT:    ld a6, 48(sp)
-; CHECK-RV64-NEXT:    ld a5, 56(sp)
-; CHECK-RV64-NEXT:    ld a4, 64(sp)
-; CHECK-RV64-NEXT:    ld a3, 72(sp)
-; CHECK-RV64-NEXT:    ld a2, 80(sp)
-; CHECK-RV64-NEXT:    ld a1, 88(sp)
-; CHECK-RV64-NEXT:    ld a0, 96(sp)
-; CHECK-RV64-NEXT:    ld s0, 104(sp)
-; CHECK-RV64-NEXT:    ld t2, 112(sp)
-; CHECK-RV64-NEXT:    ld t1, 120(sp)
-; CHECK-RV64-NEXT:    ld t0, 128(sp)
-; CHECK-RV64-NEXT:    ld ra, 136(sp)
+; CHECK-RV64-NEXT:    call otherfoo at plt
+; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t3, 32(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a7, 40(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a6, 48(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a5, 56(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a4, 64(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a3, 72(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a2, 80(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a1, 88(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld a0, 96(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld s0, 104(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t2, 112(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t1, 120(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld t0, 128(sp) # 8-byte Folded Reload
+; CHECK-RV64-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
 ; CHECK-RV64-NEXT:    addi sp, sp, 144
 ; CHECK-RV64-NEXT:    mret
 ;
 ; CHECK-RV64-F-LABEL: foo_fp_with_call:
 ; CHECK-RV64-F:       # %bb.0:
 ; CHECK-RV64-F-NEXT:    addi sp, sp, -272
-; CHECK-RV64-F-NEXT:    sd ra, 264(sp)
-; CHECK-RV64-F-NEXT:    sd t0, 256(sp)
-; CHECK-RV64-F-NEXT:    sd t1, 248(sp)
-; CHECK-RV64-F-NEXT:    sd t2, 240(sp)
-; CHECK-RV64-F-NEXT:    sd s0, 232(sp)
-; CHECK-RV64-F-NEXT:    sd a0, 224(sp)
-; CHECK-RV64-F-NEXT:    sd a1, 216(sp)
-; CHECK-RV64-F-NEXT:    sd a2, 208(sp)
-; CHECK-RV64-F-NEXT:    sd a3, 200(sp)
-; CHECK-RV64-F-NEXT:    sd a4, 192(sp)
-; CHECK-RV64-F-NEXT:    sd a5, 184(sp)
-; CHECK-RV64-F-NEXT:    sd a6, 176(sp)
-; CHECK-RV64-F-NEXT:    sd a7, 168(sp)
-; CHECK-RV64-F-NEXT:    sd t3, 160(sp)
-; CHECK-RV64-F-NEXT:    sd t4, 152(sp)
-; CHECK-RV64-F-NEXT:    sd t5, 144(sp)
-; CHECK-RV64-F-NEXT:    sd t6, 136(sp)
-; CHECK-RV64-F-NEXT:    fsw ft0, 132(sp)
-; CHECK-RV64-F-NEXT:    fsw ft1, 128(sp)
-; CHECK-RV64-F-NEXT:    fsw ft2, 124(sp)
-; CHECK-RV64-F-NEXT:    fsw ft3, 120(sp)
-; CHECK-RV64-F-NEXT:    fsw ft4, 116(sp)
-; CHECK-RV64-F-NEXT:    fsw ft5, 112(sp)
-; CHECK-RV64-F-NEXT:    fsw ft6, 108(sp)
-; CHECK-RV64-F-NEXT:    fsw ft7, 104(sp)
-; CHECK-RV64-F-NEXT:    fsw fa0, 100(sp)
-; CHECK-RV64-F-NEXT:    fsw fa1, 96(sp)
-; CHECK-RV64-F-NEXT:    fsw fa2, 92(sp)
-; CHECK-RV64-F-NEXT:    fsw fa3, 88(sp)
-; CHECK-RV64-F-NEXT:    fsw fa4, 84(sp)
-; CHECK-RV64-F-NEXT:    fsw fa5, 80(sp)
-; CHECK-RV64-F-NEXT:    fsw fa6, 76(sp)
-; CHECK-RV64-F-NEXT:    fsw fa7, 72(sp)
-; CHECK-RV64-F-NEXT:    fsw ft8, 68(sp)
-; CHECK-RV64-F-NEXT:    fsw ft9, 64(sp)
-; CHECK-RV64-F-NEXT:    fsw ft10, 60(sp)
-; CHECK-RV64-F-NEXT:    fsw ft11, 56(sp)
-; CHECK-RV64-F-NEXT:    fsw fs0, 52(sp)
-; CHECK-RV64-F-NEXT:    fsw fs1, 48(sp)
-; CHECK-RV64-F-NEXT:    fsw fs2, 44(sp)
-; CHECK-RV64-F-NEXT:    fsw fs3, 40(sp)
-; CHECK-RV64-F-NEXT:    fsw fs4, 36(sp)
-; CHECK-RV64-F-NEXT:    fsw fs5, 32(sp)
-; CHECK-RV64-F-NEXT:    fsw fs6, 28(sp)
-; CHECK-RV64-F-NEXT:    fsw fs7, 24(sp)
-; CHECK-RV64-F-NEXT:    fsw fs8, 20(sp)
-; CHECK-RV64-F-NEXT:    fsw fs9, 16(sp)
-; CHECK-RV64-F-NEXT:    fsw fs10, 12(sp)
-; CHECK-RV64-F-NEXT:    fsw fs11, 8(sp)
+; CHECK-RV64-F-NEXT:    sd ra, 264(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t0, 256(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t1, 248(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t2, 240(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd s0, 232(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a0, 224(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a1, 216(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a2, 208(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a3, 200(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a4, 192(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a5, 184(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a6, 176(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd a7, 168(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t3, 160(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t4, 152(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t5, 144(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    sd t6, 136(sp) # 8-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft0, 132(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft1, 128(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft2, 124(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft3, 120(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft4, 116(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft5, 112(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft6, 108(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft7, 104(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa0, 100(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa1, 96(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa2, 92(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa3, 88(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa4, 84(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa5, 80(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa6, 76(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fa7, 72(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft8, 68(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft9, 64(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft10, 60(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw ft11, 56(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs0, 52(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs1, 48(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs2, 44(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs3, 40(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs4, 36(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs5, 32(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs6, 28(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs7, 24(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs8, 20(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs9, 16(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs10, 12(sp) # 4-byte Folded Spill
+; CHECK-RV64-F-NEXT:    fsw fs11, 8(sp) # 4-byte Folded Spill
 ; CHECK-RV64-F-NEXT:    addi s0, sp, 272
-; CHECK-RV64-F-NEXT:    call otherfoo
-; CHECK-RV64-F-NEXT:    flw fs11, 8(sp)
-; CHECK-RV64-F-NEXT:    flw fs10, 12(sp)
-; CHECK-RV64-F-NEXT:    flw fs9, 16(sp)
-; CHECK-RV64-F-NEXT:    flw fs8, 20(sp)
-; CHECK-RV64-F-NEXT:    flw fs7, 24(sp)
-; CHECK-RV64-F-NEXT:    flw fs6, 28(sp)
-; CHECK-RV64-F-NEXT:    flw fs5, 32(sp)
-; CHECK-RV64-F-NEXT:    flw fs4, 36(sp)
-; CHECK-RV64-F-NEXT:    flw fs3, 40(sp)
-; CHECK-RV64-F-NEXT:    flw fs2, 44(sp)
-; CHECK-RV64-F-NEXT:    flw fs1, 48(sp)
-; CHECK-RV64-F-NEXT:    flw fs0, 52(sp)
-; CHECK-RV64-F-NEXT:    flw ft11, 56(sp)
-; CHECK-RV64-F-NEXT:    flw ft10, 60(sp)
-; CHECK-RV64-F-NEXT:    flw ft9, 64(sp)
-; CHECK-RV64-F-NEXT:    flw ft8, 68(sp)
-; CHECK-RV64-F-NEXT:    flw fa7, 72(sp)
-; CHECK-RV64-F-NEXT:    flw fa6, 76(sp)
-; CHECK-RV64-F-NEXT:    flw fa5, 80(sp)
-; CHECK-RV64-F-NEXT:    flw fa4, 84(sp)
-; CHECK-RV64-F-NEXT:    flw fa3, 88(sp)
-; CHECK-RV64-F-NEXT:    flw fa2, 92(sp)
-; CHECK-RV64-F-NEXT:    flw fa1, 96(sp)
-; CHECK-RV64-F-NEXT:    flw fa0, 100(sp)
-; CHECK-RV64-F-NEXT:    flw ft7, 104(sp)
-; CHECK-RV64-F-NEXT:    flw ft6, 108(sp)
-; CHECK-RV64-F-NEXT:    flw ft5, 112(sp)
-; CHECK-RV64-F-NEXT:    flw ft4, 116(sp)
-; CHECK-RV64-F-NEXT:    flw ft3, 120(sp)
-; CHECK-RV64-F-NEXT:    flw ft2, 124(sp)
-; CHECK-RV64-F-NEXT:    flw ft1, 128(sp)
-; CHECK-RV64-F-NEXT:    flw ft0, 132(sp)
-; CHECK-RV64-F-NEXT:    ld t6, 136(sp)
-; CHECK-RV64-F-NEXT:    ld t5, 144(sp)
-; CHECK-RV64-F-NEXT:    ld t4, 152(sp)
-; CHECK-RV64-F-NEXT:    ld t3, 160(sp)
-; CHECK-RV64-F-NEXT:    ld a7, 168(sp)
-; CHECK-RV64-F-NEXT:    ld a6, 176(sp)
-; CHECK-RV64-F-NEXT:    ld a5, 184(sp)
-; CHECK-RV64-F-NEXT:    ld a4, 192(sp)
-; CHECK-RV64-F-NEXT:    ld a3, 200(sp)
-; CHECK-RV64-F-NEXT:    ld a2, 208(sp)
-; CHECK-RV64-F-NEXT:    ld a1, 216(sp)
-; CHECK-RV64-F-NEXT:    ld a0, 224(sp)
-; CHECK-RV64-F-NEXT:    ld s0, 232(sp)
-; CHECK-RV64-F-NEXT:    ld t2, 240(sp)
-; CHECK-RV64-F-NEXT:    ld t1, 248(sp)
-; CHECK-RV64-F-NEXT:    ld t0, 256(sp)
-; CHECK-RV64-F-NEXT:    ld ra, 264(sp)
+; CHECK-RV64-F-NEXT:    call otherfoo at plt
+; CHECK-RV64-F-NEXT:    flw fs11, 8(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs10, 12(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs9, 16(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs8, 20(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs7, 24(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs6, 28(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs5, 32(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs4, 36(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs3, 40(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs2, 44(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs1, 48(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fs0, 52(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft11, 56(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft10, 60(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft9, 64(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft8, 68(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa7, 72(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa6, 76(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa5, 80(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa4, 84(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa3, 88(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa2, 92(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa1, 96(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw fa0, 100(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft7, 104(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft6, 108(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft5, 112(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft4, 116(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft3, 120(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft2, 124(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft1, 128(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    flw ft0, 132(sp) # 4-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t6, 136(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t5, 144(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t4, 152(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t3, 160(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a7, 168(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a6, 176(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a5, 184(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a4, 192(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a3, 200(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a2, 208(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a1, 216(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld a0, 224(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld s0, 232(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t2, 240(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t1, 248(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld t0, 256(sp) # 8-byte Folded Reload
+; CHECK-RV64-F-NEXT:    ld ra, 264(sp) # 8-byte Folded Reload
 ; CHECK-RV64-F-NEXT:    addi sp, sp, 272
 ; CHECK-RV64-F-NEXT:    mret
 ;
 ; CHECK-RV64-FD-LABEL: foo_fp_with_call:
 ; CHECK-RV64-FD:       # %bb.0:
 ; CHECK-RV64-FD-NEXT:    addi sp, sp, -400
-; CHECK-RV64-FD-NEXT:    sd ra, 392(sp)
-; CHECK-RV64-FD-NEXT:    sd t0, 384(sp)
-; CHECK-RV64-FD-NEXT:    sd t1, 376(sp)
-; CHECK-RV64-FD-NEXT:    sd t2, 368(sp)
-; CHECK-RV64-FD-NEXT:    sd s0, 360(sp)
-; CHECK-RV64-FD-NEXT:    sd a0, 352(sp)
-; CHECK-RV64-FD-NEXT:    sd a1, 344(sp)
-; CHECK-RV64-FD-NEXT:    sd a2, 336(sp)
-; CHECK-RV64-FD-NEXT:    sd a3, 328(sp)
-; CHECK-RV64-FD-NEXT:    sd a4, 320(sp)
-; CHECK-RV64-FD-NEXT:    sd a5, 312(sp)
-; CHECK-RV64-FD-NEXT:    sd a6, 304(sp)
-; CHECK-RV64-FD-NEXT:    sd a7, 296(sp)
-; CHECK-RV64-FD-NEXT:    sd t3, 288(sp)
-; CHECK-RV64-FD-NEXT:    sd t4, 280(sp)
-; CHECK-RV64-FD-NEXT:    sd t5, 272(sp)
-; CHECK-RV64-FD-NEXT:    sd t6, 264(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft0, 256(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft1, 248(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft2, 240(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft3, 232(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft4, 224(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft5, 216(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft6, 208(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft7, 200(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa0, 192(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa1, 184(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa2, 176(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa3, 168(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa4, 160(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa5, 152(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa6, 144(sp)
-; CHECK-RV64-FD-NEXT:    fsd fa7, 136(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft8, 128(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft9, 120(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft10, 112(sp)
-; CHECK-RV64-FD-NEXT:    fsd ft11, 104(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs0, 96(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs1, 88(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs2, 80(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs3, 72(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs4, 64(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs5, 56(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs6, 48(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs7, 40(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs8, 32(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs9, 24(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs10, 16(sp)
-; CHECK-RV64-FD-NEXT:    fsd fs11, 8(sp)
+; CHECK-RV64-FD-NEXT:    sd ra, 392(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t0, 384(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t1, 376(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t2, 368(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd s0, 360(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a0, 352(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a1, 344(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a2, 336(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a3, 328(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a4, 320(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a5, 312(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a6, 304(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd a7, 296(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t3, 288(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t4, 280(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t5, 272(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    sd t6, 264(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft0, 256(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft1, 248(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft2, 240(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft3, 232(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft4, 224(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft5, 216(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft6, 208(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft7, 200(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa0, 192(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa1, 184(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa2, 176(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa3, 168(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa4, 160(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa5, 152(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa6, 144(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fa7, 136(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft8, 128(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft9, 120(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft10, 112(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd ft11, 104(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs0, 96(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs1, 88(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs2, 80(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs3, 72(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs4, 64(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs5, 56(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs6, 48(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs7, 40(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs8, 32(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs9, 24(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs10, 16(sp) # 8-byte Folded Spill
+; CHECK-RV64-FD-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV64-FD-NEXT:    addi s0, sp, 400
-; CHECK-RV64-FD-NEXT:    call otherfoo
-; CHECK-RV64-FD-NEXT:    fld fs11, 8(sp)
-; CHECK-RV64-FD-NEXT:    fld fs10, 16(sp)
-; CHECK-RV64-FD-NEXT:    fld fs9, 24(sp)
-; CHECK-RV64-FD-NEXT:    fld fs8, 32(sp)
-; CHECK-RV64-FD-NEXT:    fld fs7, 40(sp)
-; CHECK-RV64-FD-NEXT:    fld fs6, 48(sp)
-; CHECK-RV64-FD-NEXT:    fld fs5, 56(sp)
-; CHECK-RV64-FD-NEXT:    fld fs4, 64(sp)
-; CHECK-RV64-FD-NEXT:    fld fs3, 72(sp)
-; CHECK-RV64-FD-NEXT:    fld fs2, 80(sp)
-; CHECK-RV64-FD-NEXT:    fld fs1, 88(sp)
-; CHECK-RV64-FD-NEXT:    fld fs0, 96(sp)
-; CHECK-RV64-FD-NEXT:    fld ft11, 104(sp)
-; CHECK-RV64-FD-NEXT:    fld ft10, 112(sp)
-; CHECK-RV64-FD-NEXT:    fld ft9, 120(sp)
-; CHECK-RV64-FD-NEXT:    fld ft8, 128(sp)
-; CHECK-RV64-FD-NEXT:    fld fa7, 136(sp)
-; CHECK-RV64-FD-NEXT:    fld fa6, 144(sp)
-; CHECK-RV64-FD-NEXT:    fld fa5, 152(sp)
-; CHECK-RV64-FD-NEXT:    fld fa4, 160(sp)
-; CHECK-RV64-FD-NEXT:    fld fa3, 168(sp)
-; CHECK-RV64-FD-NEXT:    fld fa2, 176(sp)
-; CHECK-RV64-FD-NEXT:    fld fa1, 184(sp)
-; CHECK-RV64-FD-NEXT:    fld fa0, 192(sp)
-; CHECK-RV64-FD-NEXT:    fld ft7, 200(sp)
-; CHECK-RV64-FD-NEXT:    fld ft6, 208(sp)
-; CHECK-RV64-FD-NEXT:    fld ft5, 216(sp)
-; CHECK-RV64-FD-NEXT:    fld ft4, 224(sp)
-; CHECK-RV64-FD-NEXT:    fld ft3, 232(sp)
-; CHECK-RV64-FD-NEXT:    fld ft2, 240(sp)
-; CHECK-RV64-FD-NEXT:    fld ft1, 248(sp)
-; CHECK-RV64-FD-NEXT:    fld ft0, 256(sp)
-; CHECK-RV64-FD-NEXT:    ld t6, 264(sp)
-; CHECK-RV64-FD-NEXT:    ld t5, 272(sp)
-; CHECK-RV64-FD-NEXT:    ld t4, 280(sp)
-; CHECK-RV64-FD-NEXT:    ld t3, 288(sp)
-; CHECK-RV64-FD-NEXT:    ld a7, 296(sp)
-; CHECK-RV64-FD-NEXT:    ld a6, 304(sp)
-; CHECK-RV64-FD-NEXT:    ld a5, 312(sp)
-; CHECK-RV64-FD-NEXT:    ld a4, 320(sp)
-; CHECK-RV64-FD-NEXT:    ld a3, 328(sp)
-; CHECK-RV64-FD-NEXT:    ld a2, 336(sp)
-; CHECK-RV64-FD-NEXT:    ld a1, 344(sp)
-; CHECK-RV64-FD-NEXT:    ld a0, 352(sp)
-; CHECK-RV64-FD-NEXT:    ld s0, 360(sp)
-; CHECK-RV64-FD-NEXT:    ld t2, 368(sp)
-; CHECK-RV64-FD-NEXT:    ld t1, 376(sp)
-; CHECK-RV64-FD-NEXT:    ld t0, 384(sp)
-; CHECK-RV64-FD-NEXT:    ld ra, 392(sp)
+; CHECK-RV64-FD-NEXT:    call otherfoo at plt
+; CHECK-RV64-FD-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs10, 16(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs9, 24(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs8, 32(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs7, 40(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs6, 48(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs5, 56(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs4, 64(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs3, 72(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs2, 80(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs1, 88(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fs0, 96(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft11, 104(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft10, 112(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft9, 120(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft8, 128(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa7, 136(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa6, 144(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa5, 152(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa4, 160(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa3, 168(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa2, 176(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa1, 184(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld fa0, 192(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft7, 200(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft6, 208(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft5, 216(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft4, 224(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft3, 232(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft2, 240(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft1, 248(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    fld ft0, 256(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t6, 264(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t5, 272(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t4, 280(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t3, 288(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a7, 296(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a6, 304(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a5, 312(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a4, 320(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a3, 328(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a2, 336(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a1, 344(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld a0, 352(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld s0, 360(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t2, 368(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t1, 376(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld t0, 384(sp) # 8-byte Folded Reload
+; CHECK-RV64-FD-NEXT:    ld ra, 392(sp) # 8-byte Folded Reload
 ; CHECK-RV64-FD-NEXT:    addi sp, sp, 400
 ; CHECK-RV64-FD-NEXT:    mret
   %call = call i32 bitcast (i32 (...)* @otherfoo to i32 ()*)()

diff  --git a/llvm/test/CodeGen/RISCV/large-stack.ll b/llvm/test/CodeGen/RISCV/large-stack.ll
index 7cc6e83d7d85..e4cf5eb28399 100644
--- a/llvm/test/CodeGen/RISCV/large-stack.ll
+++ b/llvm/test/CodeGen/RISCV/large-stack.ll
@@ -22,8 +22,8 @@ define void @test() {
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -2032
 ; RV32I-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
-; RV32I-WITHFP-NEXT:    sw ra, 2028(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 2024(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    .cfi_offset ra, -4
 ; RV32I-WITHFP-NEXT:    .cfi_offset s0, -8
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 2032
@@ -34,8 +34,8 @@ define void @test() {
 ; RV32I-WITHFP-NEXT:    lui a0, 74565
 ; RV32I-WITHFP-NEXT:    addi a0, a0, -352
 ; RV32I-WITHFP-NEXT:    add sp, sp, a0
-; RV32I-WITHFP-NEXT:    lw s0, 2024(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 2028(sp)
+; RV32I-WITHFP-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 2032
 ; RV32I-WITHFP-NEXT:    ret
   %tmp = alloca [ 305419896 x i8 ] , align 4
@@ -50,8 +50,8 @@ define void @test_emergency_spill_slot(i32 %a) {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -2032
 ; RV32I-FPELIM-NEXT:    .cfi_def_cfa_offset 2032
-; RV32I-FPELIM-NEXT:    sw s0, 2028(sp)
-; RV32I-FPELIM-NEXT:    sw s1, 2024(sp)
+; RV32I-FPELIM-NEXT:    sw s0, 2028(sp) # 4-byte Folded Spill
+; RV32I-FPELIM-NEXT:    sw s1, 2024(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    .cfi_offset s0, -4
 ; RV32I-FPELIM-NEXT:    .cfi_offset s1, -8
 ; RV32I-FPELIM-NEXT:    lui a1, 97
@@ -74,8 +74,8 @@ define void @test_emergency_spill_slot(i32 %a) {
 ; RV32I-FPELIM-NEXT:    lui a0, 97
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 672
 ; RV32I-FPELIM-NEXT:    add sp, sp, a0
-; RV32I-FPELIM-NEXT:    lw s1, 2024(sp)
-; RV32I-FPELIM-NEXT:    lw s0, 2028(sp)
+; RV32I-FPELIM-NEXT:    lw s1, 2024(sp) # 4-byte Folded Reload
+; RV32I-FPELIM-NEXT:    lw s0, 2028(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 2032
 ; RV32I-FPELIM-NEXT:    ret
 ;
@@ -83,10 +83,10 @@ define void @test_emergency_spill_slot(i32 %a) {
 ; RV32I-WITHFP:       # %bb.0:
 ; RV32I-WITHFP-NEXT:    addi sp, sp, -2032
 ; RV32I-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
-; RV32I-WITHFP-NEXT:    sw ra, 2028(sp)
-; RV32I-WITHFP-NEXT:    sw s0, 2024(sp)
-; RV32I-WITHFP-NEXT:    sw s1, 2020(sp)
-; RV32I-WITHFP-NEXT:    sw s2, 2016(sp)
+; RV32I-WITHFP-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
+; RV32I-WITHFP-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    .cfi_offset ra, -4
 ; RV32I-WITHFP-NEXT:    .cfi_offset s0, -8
 ; RV32I-WITHFP-NEXT:    .cfi_offset s1, -12
@@ -115,10 +115,10 @@ define void @test_emergency_spill_slot(i32 %a) {
 ; RV32I-WITHFP-NEXT:    lui a0, 97
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 688
 ; RV32I-WITHFP-NEXT:    add sp, sp, a0
-; RV32I-WITHFP-NEXT:    lw s2, 2016(sp)
-; RV32I-WITHFP-NEXT:    lw s1, 2020(sp)
-; RV32I-WITHFP-NEXT:    lw s0, 2024(sp)
-; RV32I-WITHFP-NEXT:    lw ra, 2028(sp)
+; RV32I-WITHFP-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-WITHFP-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 2032
 ; RV32I-WITHFP-NEXT:    ret
   %data = alloca [ 100000 x i32 ] , align 4

diff  --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 89c4bce122fd..958417e5e748 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -12,10 +12,10 @@ define signext i32 @square(i32 %a) nounwind {
 ; RV32I-LABEL: square:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    call __mulsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -27,11 +27,11 @@ define signext i32 @square(i32 %a) nounwind {
 ; RV64I-LABEL: square:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -47,9 +47,9 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: mul:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __mulsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -61,10 +61,10 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: mul:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -132,9 +132,9 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: mul64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __muldi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __muldi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -151,9 +151,9 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: mul64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -207,13 +207,13 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: mulhs:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    srai a3, a2, 31
-; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -225,12 +225,12 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: mulhs:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -253,13 +253,13 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV32I-LABEL: mulhu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -271,10 +271,10 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: mulhu:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index 505a351db0c4..fbff2dc965c3 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -12,9 +12,9 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: urem:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __umodsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -26,13 +26,13 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: urem:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __umoddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -48,9 +48,9 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: srem:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __modsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -62,11 +62,11 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: srem:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __moddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index b3be74cd76db..e80707c34029 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -24,19 +24,19 @@ define i32 @test() nounwind {
 ; RV32I-LABEL: test:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    addi sp, sp, -64
-; RV32I-NEXT:    sw ra, 60(sp)
-; RV32I-NEXT:    sw s0, 56(sp)
-; RV32I-NEXT:    sw s1, 52(sp)
-; RV32I-NEXT:    sw s2, 48(sp)
-; RV32I-NEXT:    sw s3, 44(sp)
-; RV32I-NEXT:    sw s4, 40(sp)
-; RV32I-NEXT:    sw s5, 36(sp)
-; RV32I-NEXT:    sw s6, 32(sp)
-; RV32I-NEXT:    sw s7, 28(sp)
-; RV32I-NEXT:    sw s8, 24(sp)
-; RV32I-NEXT:    sw s9, 20(sp)
-; RV32I-NEXT:    sw s10, 16(sp)
-; RV32I-NEXT:    sw s11, 12(sp)
+; RV32I-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui s6, %hi(a)
 ; RV32I-NEXT:    lw a0, %lo(a)(s6)
 ; RV32I-NEXT:    beqz a0, .LBB0_11
@@ -70,7 +70,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(d)(s1)
 ; RV32I-NEXT:    lw a4, %lo(e)(s0)
 ; RV32I-NEXT:    addi a5, zero, 32
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .LBB0_5: # %if.end
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(k)(s3)
@@ -83,7 +83,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(e)(s0)
 ; RV32I-NEXT:    lw a4, %lo(f)(s7)
 ; RV32I-NEXT:    addi a5, zero, 64
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .LBB0_7: # %if.end5
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(j)(s4)
@@ -96,7 +96,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(f)(s7)
 ; RV32I-NEXT:    lw a4, %lo(g)(s8)
 ; RV32I-NEXT:    addi a5, zero, 32
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .LBB0_9: # %if.end9
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(i)(s5)
@@ -109,23 +109,23 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a3, %lo(g)(s8)
 ; RV32I-NEXT:    lw a4, %lo(h)(s9)
 ; RV32I-NEXT:    addi a5, zero, 32
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:    j .LBB0_2
 ; RV32I-NEXT:  .LBB0_11: # %for.end
 ; RV32I-NEXT:    addi a0, zero, 1
-; RV32I-NEXT:    lw s11, 12(sp)
-; RV32I-NEXT:    lw s10, 16(sp)
-; RV32I-NEXT:    lw s9, 20(sp)
-; RV32I-NEXT:    lw s8, 24(sp)
-; RV32I-NEXT:    lw s7, 28(sp)
-; RV32I-NEXT:    lw s6, 32(sp)
-; RV32I-NEXT:    lw s5, 36(sp)
-; RV32I-NEXT:    lw s4, 40(sp)
-; RV32I-NEXT:    lw s3, 44(sp)
-; RV32I-NEXT:    lw s2, 48(sp)
-; RV32I-NEXT:    lw s1, 52(sp)
-; RV32I-NEXT:    lw s0, 56(sp)
-; RV32I-NEXT:    lw ra, 60(sp)
+; RV32I-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 64
 ; RV32I-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbb.ll b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
index 86ef499b1807..b95fcd5f5232 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
@@ -318,7 +318,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: ctlz_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    beqz a0, .LBB8_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    srli a1, a0, 1
@@ -350,13 +350,13 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    j .LBB8_3
 ; RV32I-NEXT:  .LBB8_2:
 ; RV32I-NEXT:    addi a0, zero, 32
 ; RV32I-NEXT:  .LBB8_3: # %cond.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -379,14 +379,14 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-LABEL: ctlz_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
-; RV32I-NEXT:    sw s6, 0(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    srli a0, a1, 1
@@ -419,7 +419,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s0, a1, 257
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    srli a0, s4, 1
 ; RV32I-NEXT:    or a0, s4, a0
@@ -443,7 +443,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    bnez s3, .LBB9_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -453,14 +453,14 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB9_3:
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    lw s6, 0(sp)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -499,7 +499,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: cttz_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    beqz a0, .LBB10_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi a1, a0, -1
@@ -523,13 +523,13 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    j .LBB10_3
 ; RV32I-NEXT:  .LBB10_2:
 ; RV32I-NEXT:    addi a0, zero, 32
 ; RV32I-NEXT:  .LBB10_3: # %cond.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -552,14 +552,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-LABEL: cttz_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
-; RV32I-NEXT:    sw s6, 0(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s3, a1
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a0, a0, -1
@@ -584,7 +584,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    addi a0, s3, -1
 ; RV32I-NEXT:    not a1, s3
@@ -600,7 +600,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    bnez s4, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
@@ -610,14 +610,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB11_3:
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    lw s6, 0(sp)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -656,7 +656,7 @@ define i32 @ctpop_i32(i32 %a) nounwind {
 ; RV32I-LABEL: ctpop_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    srli a1, a0, 1
 ; RV32I-NEXT:    lui a2, 349525
 ; RV32I-NEXT:    addi a2, a2, 1365
@@ -675,9 +675,9 @@ define i32 @ctpop_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -700,13 +700,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-LABEL: ctpop_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    srli a0, a1, 1
 ; RV32I-NEXT:    lui a2, 349525
@@ -727,7 +727,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 4112
 ; RV32I-NEXT:    addi s1, a1, 257
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli s5, a0, 24
 ; RV32I-NEXT:    srli a0, s2, 1
 ; RV32I-NEXT:    and a0, a0, s3
@@ -740,17 +740,17 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    and a0, a0, s4
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3
+; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add a0, a0, s5
 ; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
index 11e7568eba3f..f7b83deb207b 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
@@ -15,28 +15,28 @@ define float @float_test(float %a, float %b) nounwind {
 ; RV32IF-LABEL: float_test:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    sw s0, 8(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a1
-; RV32IF-NEXT:    call __addsf3
+; RV32IF-NEXT:    call __addsf3 at plt
 ; RV32IF-NEXT:    mv a1, s0
-; RV32IF-NEXT:    call __divsf3
-; RV32IF-NEXT:    lw s0, 8(sp)
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    call __divsf3 at plt
+; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: float_test:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    sd s0, 0(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    mv s0, a1
-; RV64IF-NEXT:    call __addsf3
+; RV64IF-NEXT:    call __addsf3 at plt
 ; RV64IF-NEXT:    mv a1, s0
-; RV64IF-NEXT:    call __divsf3
-; RV64IF-NEXT:    ld s0, 0(sp)
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    call __divsf3 at plt
+; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = fadd float %a, %b
@@ -48,32 +48,32 @@ define double @double_test(double %a, double %b) nounwind {
 ; RV32IF-LABEL: double_test:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp)
-; RV32IF-NEXT:    sw s0, 8(sp)
-; RV32IF-NEXT:    sw s1, 4(sp)
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    mv s0, a3
 ; RV32IF-NEXT:    mv s1, a2
-; RV32IF-NEXT:    call __adddf3
+; RV32IF-NEXT:    call __adddf3 at plt
 ; RV32IF-NEXT:    mv a2, s1
 ; RV32IF-NEXT:    mv a3, s0
-; RV32IF-NEXT:    call __divdf3
-; RV32IF-NEXT:    lw s1, 4(sp)
-; RV32IF-NEXT:    lw s0, 8(sp)
-; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    call __divdf3 at plt
+; RV32IF-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: double_test:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    sd s0, 0(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    mv s0, a1
-; RV64IF-NEXT:    call __adddf3
+; RV64IF-NEXT:    call __adddf3 at plt
 ; RV64IF-NEXT:    mv a1, s0
-; RV64IF-NEXT:    call __divdf3
-; RV64IF-NEXT:    ld s0, 0(sp)
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    call __divdf3 at plt
+; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
   %1 = fadd double %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
index ede169dce784..10cb5882b5f7 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
@@ -13,58 +13,58 @@ define half @half_test(half %a, half %b) nounwind {
 ; RV32I-LABEL: half_test:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
-; RV32I-NEXT:    sw s2, 0(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi s0, a1, -1
 ; RV32I-NEXT:    and a0, a0, s0
-; RV32I-NEXT:    call __gnu_h2f_ieee
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    and a0, s2, s0
-; RV32I-NEXT:    call __gnu_h2f_ieee
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __addsf3
+; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    call __divsf3
-; RV32I-NEXT:    call __gnu_f2h_ieee
-; RV32I-NEXT:    lw s2, 0(sp)
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __divsf3 at plt
+; RV32I-NEXT:    call __gnu_f2h_ieee at plt
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: half_test:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
-; RV64I-NEXT:    sd s2, 0(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw s0, a1, -1
 ; RV64I-NEXT:    and a0, a0, s0
-; RV64I-NEXT:    call __gnu_h2f_ieee
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    and a0, s2, s0
-; RV64I-NEXT:    call __gnu_h2f_ieee
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call __divsf3
-; RV64I-NEXT:    call __gnu_f2h_ieee
-; RV64I-NEXT:    ld s2, 0(sp)
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    call __divsf3 at plt
+; RV64I-NEXT:    call __gnu_f2h_ieee at plt
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %1 = fadd half %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/rv64-large-stack.ll b/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
index dbe19cc60e29..2821129a8559 100644
--- a/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
@@ -8,20 +8,20 @@ define void @foo() nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -2032
-; CHECK-NEXT:    sd ra, 2024(sp)
+; CHECK-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    lui a0, 95
 ; CHECK-NEXT:    addiw a0, a0, 1505
 ; CHECK-NEXT:    slli a0, a0, 13
 ; CHECK-NEXT:    addi a0, a0, -2000
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    call baz
+; CHECK-NEXT:    call baz at plt
 ; CHECK-NEXT:    lui a0, 95
 ; CHECK-NEXT:    addiw a0, a0, 1505
 ; CHECK-NEXT:    slli a0, a0, 13
 ; CHECK-NEXT:    addi a0, a0, -2000
 ; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    ld ra, 2024(sp)
+; CHECK-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 2032
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbb.ll b/llvm/test/CodeGen/RISCV/rv64Zbb.ll
index c14ef6abf56e..92ed8e7ed5fa 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbb.ll
@@ -222,7 +222,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: ctlz_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    beqz a0, .LBB9_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    srliw a1, a0, 1
@@ -280,14 +280,14 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    addi a1, a1, 257
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    addi a0, a0, -32
 ; RV64I-NEXT:    j .LBB9_3
 ; RV64I-NEXT:  .LBB9_2:
 ; RV64I-NEXT:    addi a0, zero, 32
 ; RV64I-NEXT:  .LBB9_3: # %cond.end
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -310,7 +310,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-LABEL: ctlz_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    beqz a0, .LBB10_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    srli a1, a0, 1
@@ -366,13 +366,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    addi a1, a1, 257
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    j .LBB10_3
 ; RV64I-NEXT:  .LBB10_2:
 ; RV64I-NEXT:    addi a0, zero, 64
 ; RV64I-NEXT:  .LBB10_3: # %cond.end
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -395,7 +395,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: cttz_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    beqz a0, .LBB11_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
@@ -441,13 +441,13 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    addi a1, a1, 257
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    j .LBB11_3
 ; RV64I-NEXT:  .LBB11_2:
 ; RV64I-NEXT:    addi a0, zero, 32
 ; RV64I-NEXT:  .LBB11_3: # %cond.end
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -470,7 +470,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-LABEL: cttz_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    beqz a0, .LBB12_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
@@ -516,13 +516,13 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    addi a1, a1, 257
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    j .LBB12_3
 ; RV64I-NEXT:  .LBB12_2:
 ; RV64I-NEXT:    addi a0, zero, 64
 ; RV64I-NEXT:  .LBB12_3: # %cond.end
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -545,7 +545,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: ctpop_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a1, a0, 32
 ; RV64I-NEXT:    srli a1, a1, 32
 ; RV64I-NEXT:    srliw a0, a0, 1
@@ -582,9 +582,9 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    addi a1, a1, 257
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -607,7 +607,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV64I-LABEL: ctpop_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    srli a1, a0, 1
 ; RV64I-NEXT:    lui a2, 21845
 ; RV64I-NEXT:    addiw a2, a2, 1365
@@ -648,9 +648,9 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 257
 ; RV64I-NEXT:    slli a1, a1, 16
 ; RV64I-NEXT:    addi a1, a1, 257
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
index a545a13b9c39..1f24d8c097cf 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
@@ -9,25 +9,25 @@ define i64 @complex_float_add(i64 %a.coerce, i64 %b.coerce) nounwind {
 ; CHECK-LABEL: complex_float_add:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -32
-; CHECK-NEXT:    sd ra, 24(sp)
-; CHECK-NEXT:    sd s0, 16(sp)
-; CHECK-NEXT:    sd s1, 8(sp)
-; CHECK-NEXT:    sd s2, 0(sp)
+; CHECK-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    srli s2, a0, 32
 ; CHECK-NEXT:    srli s1, a1, 32
-; CHECK-NEXT:    call __addsf3
+; CHECK-NEXT:    call __addsf3 at plt
 ; CHECK-NEXT:    mv s0, a0
 ; CHECK-NEXT:    mv a0, s2
 ; CHECK-NEXT:    mv a1, s1
-; CHECK-NEXT:    call __addsf3
+; CHECK-NEXT:    call __addsf3 at plt
 ; CHECK-NEXT:    slli a0, a0, 32
 ; CHECK-NEXT:    slli a1, s0, 32
 ; CHECK-NEXT:    srli a1, a1, 32
 ; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    ld s2, 0(sp)
-; CHECK-NEXT:    ld s1, 8(sp)
-; CHECK-NEXT:    ld s0, 16(sp)
-; CHECK-NEXT:    ld ra, 24(sp)
+; CHECK-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 32
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
index 5f8ca8074786..0298c21359f3 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
@@ -11,18 +11,18 @@ define i32 @fp64_to_ui32(double %a) nounwind {
 ; RV64I-LABEL: fp64_to_ui32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixunsdfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64IF-LABEL: fp64_to_ui32:
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call __fixunsdfsi
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixunsdfsi at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 entry:
@@ -34,18 +34,18 @@ define i32 @fp64_to_si32(double %a) nounwind {
 ; RV64I-LABEL: fp64_to_si32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixdfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixdfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64IF-LABEL: fp64_to_si32:
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call __fixdfsi
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 entry:
@@ -62,18 +62,18 @@ define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
 ; RV64I-LABEL: strict_fp64_to_ui32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixunsdfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunsdfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64IF-LABEL: strict_fp64_to_ui32:
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call __fixunsdfsi
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixunsdfsi at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 entry:
@@ -85,18 +85,18 @@ define i32 @struct_fp64_to_si32(double %a) nounwind strictfp {
 ; RV64I-LABEL: struct_fp64_to_si32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixdfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixdfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64IF-LABEL: struct_fp64_to_si32:
 ; RV64IF:       # %bb.0: # %entry
 ; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp)
-; RV64IF-NEXT:    call __fixdfsi
-; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixdfsi at plt
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
index 1008f0ccf371..0b450c127077 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
@@ -11,9 +11,9 @@ define float @fadd_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fadd_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __addsf3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fadd float %a, %b
@@ -24,9 +24,9 @@ define float @fsub_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fsub_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __subsf3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __subsf3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fsub float %a, %b
@@ -37,9 +37,9 @@ define float @fmul_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fmul_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __mulsf3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __mulsf3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fmul float %a, %b
@@ -50,9 +50,9 @@ define float @fdiv_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fdiv_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __divsf3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __divsf3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fdiv float %a, %b
@@ -63,10 +63,10 @@ define i32 @feq_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: feq_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __eqsf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __eqsf2 at plt
 ; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp oeq float %a, %b
@@ -78,11 +78,11 @@ define i32 @flt_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: flt_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __ltsf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltsf2 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    slti a0, a0, 0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp olt float %a, %b
@@ -94,11 +94,11 @@ define i32 @fle_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fle_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __lesf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __lesf2 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    slti a0, a0, 1
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp ole float %a, %b
@@ -110,11 +110,11 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
 ; RV64I-LABEL: fcmp_ogt:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __gtsf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sgtz a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp ogt float %a, %b
@@ -126,12 +126,12 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV64I-LABEL: fcmp_oge:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __gesf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gesf2 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    addi a1, zero, -1
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp oge float %a, %b
@@ -143,10 +143,10 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
 ; RV64I-LABEL: fcmp_ord:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __unordsf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp ord float %a, %b
@@ -158,10 +158,10 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
 ; RV64I-LABEL: fcmp_une:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __nesf2
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __nesf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fcmp une float %a, %b
@@ -173,9 +173,9 @@ define i32 @fcvt_w_s(float %a) nounwind {
 ; RV64I-LABEL: fcvt_w_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixsfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixsfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fptosi float %a to i32
@@ -186,9 +186,9 @@ define i32 @fcvt_wu_s(float %a) nounwind {
 ; RV64I-LABEL: fcvt_wu_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixunssfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fptoui float %a to i32
@@ -199,10 +199,10 @@ define float @fcvt_s_w(i32 %a) nounwind {
 ; RV64I-LABEL: fcvt_s_w:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    call __floatsisf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __floatsisf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = sitofp i32 %a to float
@@ -213,11 +213,11 @@ define float @fcvt_s_wu(i32 %a) nounwind {
 ; RV64I-LABEL: fcvt_s_wu:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    call __floatunsisf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __floatunsisf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = uitofp i32 %a to float
@@ -228,9 +228,9 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ; RV64I-LABEL: fcvt_l_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixsfdi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fptosi float %a to i64
@@ -241,9 +241,9 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ; RV64I-LABEL: fcvt_lu_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixunssfdi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = fptoui float %a to i64
@@ -254,9 +254,9 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ; RV64I-LABEL: fcvt_s_l:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __floatdisf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floatdisf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = sitofp i64 %a to float
@@ -267,9 +267,9 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ; RV64I-LABEL: fcvt_s_lu:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __floatundisf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floatundisf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = uitofp i64 %a to float
@@ -282,9 +282,9 @@ define float @fsqrt_s(float %a) nounwind {
 ; RV64I-LABEL: fsqrt_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call sqrtf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call sqrtf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.sqrt.f32(float %a)
@@ -312,9 +312,9 @@ define float @fmin_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fmin_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call fminf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call fminf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.minnum.f32(float %a, float %b)
@@ -327,9 +327,9 @@ define float @fmax_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fmax_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call fmaxf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call fmaxf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.maxnum.f32(float %a, float %b)
@@ -343,9 +343,9 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
 ; RV64I-LABEL: fmadd_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call fmaf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
@@ -356,22 +356,22 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-LABEL: fmsub_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a2, a0, a1
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    mv a1, s0
-; RV64I-NEXT:    call fmaf
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %c_ = fadd float 0.0, %c ; avoid negation using xor
@@ -384,28 +384,28 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV64I-LABEL: fnmadd_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
-; RV64I-NEXT:    sd s2, 0(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s1, a2
 ; RV64I-NEXT:    xor a2, a0, a2
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    mv a1, s2
-; RV64I-NEXT:    call fmaf
-; RV64I-NEXT:    ld s2, 0(sp)
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %a_ = fadd float 0.0, %a
@@ -420,21 +420,21 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-LABEL: fnmsub_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    mv a1, zero
-; RV64I-NEXT:    call __addsf3
+; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    call fmaf
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    call fmaf at plt
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %a_ = fadd float 0.0, %a
@@ -449,9 +449,9 @@ define float @fceil_s(float %a) nounwind {
 ; RV64I-LABEL: fceil_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call ceilf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call ceilf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.ceil.f32(float %a)
@@ -464,9 +464,9 @@ define float @fcos_s(float %a) nounwind {
 ; RV64I-LABEL: fcos_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call cosf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call cosf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.cos.f32(float %a)
@@ -479,9 +479,9 @@ define float @fsin_s(float %a) nounwind {
 ; RV64I-LABEL: fsin_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call sinf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call sinf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.sin.f32(float %a)
@@ -494,9 +494,9 @@ define float @fexp_s(float %a) nounwind {
 ; RV64I-LABEL: fexp_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call expf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call expf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.exp.f32(float %a)
@@ -509,9 +509,9 @@ define float @fexp2_s(float %a) nounwind {
 ; RV64I-LABEL: fexp2_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call exp2f
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call exp2f at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.exp2.f32(float %a)
@@ -524,9 +524,9 @@ define float @ffloor_s(float %a) nounwind {
 ; RV64I-LABEL: ffloor_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call floorf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call floorf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.floor.f32(float %a)
@@ -539,9 +539,9 @@ define float @fflog_s(float %a) nounwind {
 ; RV64I-LABEL: fflog_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call llvm.flog.f32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call llvm.flog.f32 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.flog.f32(float %a)
@@ -554,9 +554,9 @@ define float @fflog2_s(float %a) nounwind {
 ; RV64I-LABEL: fflog2_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call llvm.flog2.f32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call llvm.flog2.f32 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.flog2.f32(float %a)
@@ -569,9 +569,9 @@ define float @fflog10_s(float %a) nounwind {
 ; RV64I-LABEL: fflog10_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call llvm.flog10.f32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call llvm.flog10.f32 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.flog10.f32(float %a)
@@ -584,9 +584,9 @@ define float @fnearbyint_s(float %a) nounwind {
 ; RV64I-LABEL: fnearbyint_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call llvm.fnearbyint.f32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call llvm.fnearbyint.f32 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.fnearbyint.f32(float %a)
@@ -599,9 +599,9 @@ define float @fround_s(float %a) nounwind {
 ; RV64I-LABEL: fround_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call roundf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call roundf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.round.f32(float %a)
@@ -614,9 +614,9 @@ define float @fpround_s(float %a) nounwind {
 ; RV64I-LABEL: fpround_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call llvm.fpround.f32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call llvm.fpround.f32 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.fpround.f32(float %a)
@@ -629,9 +629,9 @@ define float @frint_s(float %a) nounwind {
 ; RV64I-LABEL: frint_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call rintf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call rintf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.rint.f32(float %a)
@@ -644,9 +644,9 @@ define float @frem_s(float %a) nounwind {
 ; RV64I-LABEL: frem_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call llvm.rem.f32
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call llvm.rem.f32 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.rem.f32(float %a)
@@ -659,9 +659,9 @@ define float @fpow_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fpow_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call powf
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call powf at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.pow.f32(float %a, float %b)
@@ -674,11 +674,11 @@ define float @fpowi_s(float %a, i32 %b) nounwind {
 ; RV64I-LABEL: fpowi_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    call __powisf2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __powisf2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.powi.f32(float %a, i32 %b)
@@ -689,9 +689,9 @@ define double @fp_ext(float %a) nounwind {
 ; RV64I-LABEL: fp_ext:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __extendsfdf2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendsfdf2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %conv = fpext float %a to double
@@ -702,9 +702,9 @@ define float @fp_trunc(double %a) nounwind {
 ; RV64I-LABEL: fp_trunc:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __truncdfsf2
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __truncdfsf2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %conv = fptrunc double %a to float
@@ -715,9 +715,9 @@ define i32 @fp32_to_ui32(float %a) nounwind {
 ; RV64I-LABEL: fp32_to_ui32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixunssfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 entry:
@@ -729,9 +729,9 @@ define i32 @fp32_to_si32(float %a) nounwind  {
 ; RV64I-LABEL: fp32_to_si32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixsfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixsfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 entry:
@@ -748,9 +748,9 @@ define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp {
 ; RV64I-LABEL: strict_fp32_to_ui32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixunssfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunssfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 entry:
@@ -762,9 +762,9 @@ define i32 @strict_fp32_to_si32(float %a) nounwind strictfp {
 ; RV64I-LABEL: strict_fp32_to_si32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
-; RV64I-NEXT:    call __fixsfsi
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixsfsi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/select-and.ll b/llvm/test/CodeGen/RISCV/select-and.ll
index b5319dab1439..c7554d5f1d07 100644
--- a/llvm/test/CodeGen/RISCV/select-and.ll
+++ b/llvm/test/CodeGen/RISCV/select-and.ll
@@ -39,34 +39,34 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV32I-LABEL: if_of_and:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    addi a1, zero, 1
 ; RV32I-NEXT:    bne a0, a1, .LBB1_2
 ; RV32I-NEXT:  # %bb.1: # %if.then
-; RV32I-NEXT:    call both
+; RV32I-NEXT:    call both at plt
 ; RV32I-NEXT:    j .LBB1_3
 ; RV32I-NEXT:  .LBB1_2: # %if.else
-; RV32I-NEXT:    call neither
+; RV32I-NEXT:    call neither at plt
 ; RV32I-NEXT:  .LBB1_3: # %if.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: if_of_and:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    addi a1, zero, 1
 ; RV64I-NEXT:    bne a0, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %if.then
-; RV64I-NEXT:    call both
+; RV64I-NEXT:    call both at plt
 ; RV64I-NEXT:    j .LBB1_3
 ; RV64I-NEXT:  .LBB1_2: # %if.else
-; RV64I-NEXT:    call neither
+; RV64I-NEXT:    call neither at plt
 ; RV64I-NEXT:  .LBB1_3: # %if.end
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = and i1 %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/select-or.ll b/llvm/test/CodeGen/RISCV/select-or.ll
index 30b0d331d6f4..768eb7ec05a2 100644
--- a/llvm/test/CodeGen/RISCV/select-or.ll
+++ b/llvm/test/CodeGen/RISCV/select-or.ll
@@ -39,34 +39,34 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV32I-LABEL: if_of_or:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    addi a1, zero, 1
 ; RV32I-NEXT:    bne a0, a1, .LBB1_2
 ; RV32I-NEXT:  # %bb.1: # %if.then
-; RV32I-NEXT:    call either
+; RV32I-NEXT:    call either at plt
 ; RV32I-NEXT:    j .LBB1_3
 ; RV32I-NEXT:  .LBB1_2: # %if.else
-; RV32I-NEXT:    call neither
+; RV32I-NEXT:    call neither at plt
 ; RV32I-NEXT:  .LBB1_3: # %if.end
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: if_of_or:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    addi a1, zero, 1
 ; RV64I-NEXT:    bne a0, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %if.then
-; RV64I-NEXT:    call either
+; RV64I-NEXT:    call either at plt
 ; RV64I-NEXT:    j .LBB1_3
 ; RV64I-NEXT:  .LBB1_2: # %if.else
-; RV64I-NEXT:    call neither
+; RV64I-NEXT:    call neither at plt
 ; RV64I-NEXT:  .LBB1_3: # %if.end
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = or i1 %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index 0c9c17ac7a4a..3c02a6094152 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -20,11 +20,11 @@ declare void @foo()
 define void @f2() shadowcallstack {
 ; RV32-LABEL: f2:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    tail foo
+; RV32-NEXT:    tail foo at plt
 ;
 ; RV64-LABEL: f2:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    tail foo
+; RV64-NEXT:    tail foo at plt
   tail call void @foo()
   ret void
 }
@@ -38,10 +38,10 @@ define i32 @f3() shadowcallstack {
 ; RV32-NEXT:    addi s2, s2, 4
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp)
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    call bar
-; RV32-NEXT:    lw ra, 12(sp)
+; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    lw ra, -4(s2)
 ; RV32-NEXT:    addi s2, s2, -4
@@ -53,10 +53,10 @@ define i32 @f3() shadowcallstack {
 ; RV64-NEXT:    addi s2, s2, 8
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    sd ra, 8(sp)
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    call bar
-; RV64-NEXT:    ld ra, 8(sp)
+; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ld ra, -8(s2)
 ; RV64-NEXT:    addi s2, s2, -8
@@ -73,28 +73,28 @@ define i32 @f4() shadowcallstack {
 ; RV32-NEXT:    addi s2, s2, 4
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw ra, 12(sp)
-; RV32-NEXT:    sw s0, 8(sp)
-; RV32-NEXT:    sw s1, 4(sp)
-; RV32-NEXT:    sw s3, 0(sp)
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 0(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    .cfi_offset s1, -12
 ; RV32-NEXT:    .cfi_offset s3, -16
-; RV32-NEXT:    call bar
+; RV32-NEXT:    call bar at plt
 ; RV32-NEXT:    mv s3, a0
-; RV32-NEXT:    call bar
+; RV32-NEXT:    call bar at plt
 ; RV32-NEXT:    mv s1, a0
-; RV32-NEXT:    call bar
+; RV32-NEXT:    call bar at plt
 ; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    call bar
+; RV32-NEXT:    call bar at plt
 ; RV32-NEXT:    add a1, s3, s1
 ; RV32-NEXT:    add a0, s0, a0
 ; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    lw s3, 0(sp)
-; RV32-NEXT:    lw s1, 4(sp)
-; RV32-NEXT:    lw s0, 8(sp)
-; RV32-NEXT:    lw ra, 12(sp)
+; RV32-NEXT:    lw s3, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    lw ra, -4(s2)
 ; RV32-NEXT:    addi s2, s2, -4
@@ -106,28 +106,28 @@ define i32 @f4() shadowcallstack {
 ; RV64-NEXT:    addi s2, s2, 8
 ; RV64-NEXT:    addi sp, sp, -32
 ; RV64-NEXT:    .cfi_def_cfa_offset 32
-; RV64-NEXT:    sd ra, 24(sp)
-; RV64-NEXT:    sd s0, 16(sp)
-; RV64-NEXT:    sd s1, 8(sp)
-; RV64-NEXT:    sd s3, 0(sp)
+; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s3, 0(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    .cfi_offset s1, -24
 ; RV64-NEXT:    .cfi_offset s3, -32
-; RV64-NEXT:    call bar
+; RV64-NEXT:    call bar at plt
 ; RV64-NEXT:    mv s3, a0
-; RV64-NEXT:    call bar
+; RV64-NEXT:    call bar at plt
 ; RV64-NEXT:    mv s1, a0
-; RV64-NEXT:    call bar
+; RV64-NEXT:    call bar at plt
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    call bar
+; RV64-NEXT:    call bar at plt
 ; RV64-NEXT:    add a1, s3, s1
 ; RV64-NEXT:    add a0, s0, a0
 ; RV64-NEXT:    addw a0, a1, a0
-; RV64-NEXT:    ld s3, 0(sp)
-; RV64-NEXT:    ld s1, 8(sp)
-; RV64-NEXT:    ld s0, 16(sp)
-; RV64-NEXT:    ld ra, 24(sp)
+; RV64-NEXT:    ld s3, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ld ra, -8(s2)
 ; RV64-NEXT:    addi s2, s2, -8
@@ -148,9 +148,9 @@ define i32 @f5() shadowcallstack nounwind {
 ; RV32-NEXT:    sw ra, 0(s2)
 ; RV32-NEXT:    addi s2, s2, 4
 ; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp)
-; RV32-NEXT:    call bar
-; RV32-NEXT:    lw ra, 12(sp)
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    call bar at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    lw ra, -4(s2)
 ; RV32-NEXT:    addi s2, s2, -4
@@ -161,9 +161,9 @@ define i32 @f5() shadowcallstack nounwind {
 ; RV64-NEXT:    sd ra, 0(s2)
 ; RV64-NEXT:    addi s2, s2, 8
 ; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    sd ra, 8(sp)
-; RV64-NEXT:    call bar
-; RV64-NEXT:    ld ra, 8(sp)
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    call bar at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ld ra, -8(s2)
 ; RV64-NEXT:    addi s2, s2, -8

diff  --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index 6e1575d9dc5e..934ea94fbbe6 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -38,9 +38,9 @@ define i64 @lshr64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I-LABEL: lshr64_minsize:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __lshrdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __lshrdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -83,9 +83,9 @@ define i64 @ashr64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I-LABEL: ashr64_minsize:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __ashrdi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ashrdi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -128,9 +128,9 @@ define i64 @shl64_minsize(i64 %a, i64 %b) minsize nounwind {
 ; RV32I-LABEL: shl64_minsize:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    call __ashldi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ashldi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -146,8 +146,8 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: lshr128:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a2, 0(a2)
 ; RV32I-NEXT:    lw a3, 0(a1)
 ; RV32I-NEXT:    lw a4, 4(a1)
@@ -160,7 +160,7 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    addi a0, sp, 24
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call __lshrti3
+; RV32I-NEXT:    call __lshrti3 at plt
 ; RV32I-NEXT:    lw a0, 36(sp)
 ; RV32I-NEXT:    lw a1, 32(sp)
 ; RV32I-NEXT:    lw a2, 28(sp)
@@ -169,8 +169,8 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    sw a1, 8(s0)
 ; RV32I-NEXT:    sw a2, 4(s0)
 ; RV32I-NEXT:    sw a3, 0(s0)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;
@@ -199,8 +199,8 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: ashr128:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a2, 0(a2)
 ; RV32I-NEXT:    lw a3, 0(a1)
 ; RV32I-NEXT:    lw a4, 4(a1)
@@ -213,7 +213,7 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    addi a0, sp, 24
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call __ashrti3
+; RV32I-NEXT:    call __ashrti3 at plt
 ; RV32I-NEXT:    lw a0, 36(sp)
 ; RV32I-NEXT:    lw a1, 32(sp)
 ; RV32I-NEXT:    lw a2, 28(sp)
@@ -222,8 +222,8 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    sw a1, 8(s0)
 ; RV32I-NEXT:    sw a2, 4(s0)
 ; RV32I-NEXT:    sw a3, 0(s0)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;
@@ -252,8 +252,8 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: shl128:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a2, 0(a2)
 ; RV32I-NEXT:    lw a3, 0(a1)
 ; RV32I-NEXT:    lw a4, 4(a1)
@@ -266,7 +266,7 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    addi a0, sp, 24
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call __ashlti3
+; RV32I-NEXT:    call __ashlti3 at plt
 ; RV32I-NEXT:    lw a0, 36(sp)
 ; RV32I-NEXT:    lw a1, 32(sp)
 ; RV32I-NEXT:    lw a2, 28(sp)
@@ -275,8 +275,8 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    sw a1, 8(s0)
 ; RV32I-NEXT:    sw a2, 4(s0)
 ; RV32I-NEXT:    sw a3, 0(s0)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
index d8e9fdf0b7d0..9b3ef708da83 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
@@ -11,15 +11,15 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-NOSW-LABEL: eliminate_restore:
 ; RV32I-NOSW:       # %bb.0:
 ; RV32I-NOSW-NEXT:    addi sp, sp, -16
-; RV32I-NOSW-NEXT:    sw ra, 12(sp)
+; RV32I-NOSW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NOSW-NEXT:    addi a1, zero, 32
 ; RV32I-NOSW-NEXT:    bgeu a1, a0, .LBB0_2
 ; RV32I-NOSW-NEXT:  # %bb.1: # %if.end
-; RV32I-NOSW-NEXT:    lw ra, 12(sp)
+; RV32I-NOSW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NOSW-NEXT:    addi sp, sp, 16
 ; RV32I-NOSW-NEXT:    ret
 ; RV32I-NOSW-NEXT:  .LBB0_2: # %if.then
-; RV32I-NOSW-NEXT:    call abort
+; RV32I-NOSW-NEXT:    call abort at plt
 ;
 ; RV32I-SW-LABEL: eliminate_restore:
 ; RV32I-SW:       # %bb.0:
@@ -29,8 +29,8 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-SW-NEXT:    ret
 ; RV32I-SW-NEXT:  .LBB0_2: # %if.then
 ; RV32I-SW-NEXT:    addi sp, sp, -16
-; RV32I-SW-NEXT:    sw ra, 12(sp)
-; RV32I-SW-NEXT:    call abort
+; RV32I-SW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-SW-NEXT:    call abort at plt
 ;
 ; RV32I-SW-SR-LABEL: eliminate_restore:
 ; RV32I-SW-SR:       # %bb.0:
@@ -40,7 +40,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-SW-SR-NEXT:    ret
 ; RV32I-SW-SR-NEXT:  .LBB0_2: # %if.then
 ; RV32I-SW-SR-NEXT:    call t0, __riscv_save_0
-; RV32I-SW-SR-NEXT:    call abort
+; RV32I-SW-SR-NEXT:    call abort at plt
   %cmp = icmp ule i32 %n, 32
   br i1 %cmp, label %if.then, label %if.end
 
@@ -58,8 +58,8 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-NOSW-LABEL: conditional_alloca:
 ; RV32I-NOSW:       # %bb.0:
 ; RV32I-NOSW-NEXT:    addi sp, sp, -16
-; RV32I-NOSW-NEXT:    sw ra, 12(sp)
-; RV32I-NOSW-NEXT:    sw s0, 8(sp)
+; RV32I-NOSW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NOSW-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NOSW-NEXT:    addi s0, sp, 16
 ; RV32I-NOSW-NEXT:    addi a1, zero, 32
 ; RV32I-NOSW-NEXT:    bltu a1, a0, .LBB1_2
@@ -68,11 +68,11 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-NOSW-NEXT:    andi a0, a0, -16
 ; RV32I-NOSW-NEXT:    sub a0, sp, a0
 ; RV32I-NOSW-NEXT:    mv sp, a0
-; RV32I-NOSW-NEXT:    call notdead
+; RV32I-NOSW-NEXT:    call notdead at plt
 ; RV32I-NOSW-NEXT:  .LBB1_2: # %if.end
 ; RV32I-NOSW-NEXT:    addi sp, s0, -16
-; RV32I-NOSW-NEXT:    lw s0, 8(sp)
-; RV32I-NOSW-NEXT:    lw ra, 12(sp)
+; RV32I-NOSW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NOSW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NOSW-NEXT:    addi sp, sp, 16
 ; RV32I-NOSW-NEXT:    ret
 ;
@@ -82,17 +82,17 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-SW-NEXT:    bltu a1, a0, .LBB1_2
 ; RV32I-SW-NEXT:  # %bb.1: # %if.then
 ; RV32I-SW-NEXT:    addi sp, sp, -16
-; RV32I-SW-NEXT:    sw ra, 12(sp)
-; RV32I-SW-NEXT:    sw s0, 8(sp)
+; RV32I-SW-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-SW-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-SW-NEXT:    addi s0, sp, 16
 ; RV32I-SW-NEXT:    addi a0, a0, 15
 ; RV32I-SW-NEXT:    andi a0, a0, -16
 ; RV32I-SW-NEXT:    sub a0, sp, a0
 ; RV32I-SW-NEXT:    mv sp, a0
-; RV32I-SW-NEXT:    call notdead
+; RV32I-SW-NEXT:    call notdead at plt
 ; RV32I-SW-NEXT:    addi sp, s0, -16
-; RV32I-SW-NEXT:    lw s0, 8(sp)
-; RV32I-SW-NEXT:    lw ra, 12(sp)
+; RV32I-SW-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-SW-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-SW-NEXT:    addi sp, sp, 16
 ; RV32I-SW-NEXT:  .LBB1_2: # %if.end
 ; RV32I-SW-NEXT:    ret
@@ -108,7 +108,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-SW-SR-NEXT:    andi a0, a0, -16
 ; RV32I-SW-SR-NEXT:    sub a0, sp, a0
 ; RV32I-SW-SR-NEXT:    mv sp, a0
-; RV32I-SW-SR-NEXT:    call notdead
+; RV32I-SW-SR-NEXT:    call notdead at plt
 ; RV32I-SW-SR-NEXT:    addi sp, s0, -16
 ; RV32I-SW-SR-NEXT:    tail __riscv_restore_1
 ; RV32I-SW-SR-NEXT:  .LBB1_2: # %if.end

diff  --git a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
index 49f8e7010d66..f99e8d722679 100644
--- a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
+++ b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
@@ -7,13 +7,13 @@ define i32 @SplitSP() nounwind {
 ; RV32I-LABEL: SplitSP:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    addi sp, sp, -2032
-; RV32I-NEXT:    sw ra, 2028(sp)
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    addi a0, sp, 16
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:    mv a0, zero
 ; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    lw ra, 2028(sp)
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    ret
 entry:
@@ -28,11 +28,11 @@ define i32 @NoSplitSP() nounwind {
 ; RV32I-LABEL: NoSplitSP:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    addi sp, sp, -2032
-; RV32I-NEXT:    sw ra, 2028(sp)
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a0, sp, 4
-; RV32I-NEXT:    call foo
+; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:    mv a0, zero
-; RV32I-NEXT:    lw ra, 2028(sp)
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 477320f2ec79..0030e6d0b8c3 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -12,10 +12,10 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_positive_odd:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 95
-; RV32I-NEXT:    call __modsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -36,11 +36,11 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV64I-LABEL: fold_srem_positive_odd:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
-; RV64I-NEXT:    call __moddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -73,10 +73,10 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_positive_even:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 1060
-; RV32I-NEXT:    call __modsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -96,11 +96,11 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV64I-LABEL: fold_srem_positive_even:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    addi a1, zero, 1060
-; RV64I-NEXT:    call __moddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -130,10 +130,10 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_negative_odd:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, -723
-; RV32I-NEXT:    call __modsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -153,11 +153,11 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV64I-LABEL: fold_srem_negative_odd:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    addi a1, zero, -723
-; RV64I-NEXT:    call __moddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -190,11 +190,11 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_negative_even:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a1, 1048570
 ; RV32I-NEXT:    addi a1, a1, 1595
-; RV32I-NEXT:    call __modsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -215,12 +215,12 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV64I-LABEL: fold_srem_negative_even:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    lui a1, 1048570
 ; RV64I-NEXT:    addiw a1, a1, 1595
-; RV64I-NEXT:    call __moddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -254,20 +254,20 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV32I-LABEL: combine_srem_sdiv:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __divsi3
+; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    add a0, s1, a0
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -289,21 +289,21 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV64I-LABEL: combine_srem_sdiv:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w s0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    addw a0, s1, a0
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -451,32 +451,32 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV32I-LABEL: dont_fold_srem_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 98
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __moddi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: dont_fold_srem_i64:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    addi a2, zero, 98
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __moddi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: dont_fold_srem_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 98
-; RV64I-NEXT:    call __moddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index b7c40eb6eab2..9cf570e3fc03 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -12,13 +12,13 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: fold_srem_vec_1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lh s2, 12(a1)
 ; RV32I-NEXT:    lh s3, 8(a1)
 ; RV32I-NEXT:    lh s0, 4(a1)
@@ -26,30 +26,30 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, zero, -124
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    addi a1, zero, 98
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, -1003
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
 ; RV32I-NEXT:    sh s0, 4(s1)
 ; RV32I-NEXT:    sh s5, 2(s1)
 ; RV32I-NEXT:    sh s4, 0(s1)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -106,13 +106,13 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: fold_srem_vec_1:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
-; RV64I-NEXT:    sd ra, 56(sp)
-; RV64I-NEXT:    sd s0, 48(sp)
-; RV64I-NEXT:    sd s1, 40(sp)
-; RV64I-NEXT:    sd s2, 32(sp)
-; RV64I-NEXT:    sd s3, 24(sp)
-; RV64I-NEXT:    sd s4, 16(sp)
-; RV64I-NEXT:    sd s5, 8(sp)
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lh s2, 24(a1)
 ; RV64I-NEXT:    lh s3, 16(a1)
 ; RV64I-NEXT:    lh s0, 8(a1)
@@ -120,30 +120,30 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, zero, -124
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    addi a1, zero, 98
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, -1003
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
 ; RV64I-NEXT:    sh s0, 4(s1)
 ; RV64I-NEXT:    sh s5, 2(s1)
 ; RV64I-NEXT:    sh s4, 0(s1)
-; RV64I-NEXT:    ld s5, 8(sp)
-; RV64I-NEXT:    ld s4, 16(sp)
-; RV64I-NEXT:    ld s3, 24(sp)
-; RV64I-NEXT:    ld s2, 32(sp)
-; RV64I-NEXT:    ld s1, 40(sp)
-; RV64I-NEXT:    ld s0, 48(sp)
-; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
 ;
@@ -228,13 +228,13 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: fold_srem_vec_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lh s2, 12(a1)
 ; RV32I-NEXT:    lh s3, 8(a1)
 ; RV32I-NEXT:    lh s0, 4(a1)
@@ -242,30 +242,30 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
 ; RV32I-NEXT:    sh s0, 4(s1)
 ; RV32I-NEXT:    sh s5, 2(s1)
 ; RV32I-NEXT:    sh s4, 0(s1)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -315,13 +315,13 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: fold_srem_vec_2:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
-; RV64I-NEXT:    sd ra, 56(sp)
-; RV64I-NEXT:    sd s0, 48(sp)
-; RV64I-NEXT:    sd s1, 40(sp)
-; RV64I-NEXT:    sd s2, 32(sp)
-; RV64I-NEXT:    sd s3, 24(sp)
-; RV64I-NEXT:    sd s4, 16(sp)
-; RV64I-NEXT:    sd s5, 8(sp)
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lh s2, 24(a1)
 ; RV64I-NEXT:    lh s3, 16(a1)
 ; RV64I-NEXT:    lh s0, 8(a1)
@@ -329,30 +329,30 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
 ; RV64I-NEXT:    sh s0, 4(s1)
 ; RV64I-NEXT:    sh s5, 2(s1)
 ; RV64I-NEXT:    sh s4, 0(s1)
-; RV64I-NEXT:    ld s5, 8(sp)
-; RV64I-NEXT:    ld s4, 16(sp)
-; RV64I-NEXT:    ld s3, 24(sp)
-; RV64I-NEXT:    ld s2, 32(sp)
-; RV64I-NEXT:    ld s1, 40(sp)
-; RV64I-NEXT:    ld s0, 48(sp)
-; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
 ;
@@ -414,17 +414,17 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: combine_srem_sdiv:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
-; RV32I-NEXT:    sw s1, 36(sp)
-; RV32I-NEXT:    sw s2, 32(sp)
-; RV32I-NEXT:    sw s3, 28(sp)
-; RV32I-NEXT:    sw s4, 24(sp)
-; RV32I-NEXT:    sw s5, 20(sp)
-; RV32I-NEXT:    sw s6, 16(sp)
-; RV32I-NEXT:    sw s7, 12(sp)
-; RV32I-NEXT:    sw s8, 8(sp)
-; RV32I-NEXT:    sw s9, 4(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lh s2, 0(a1)
 ; RV32I-NEXT:    lh s3, 4(a1)
 ; RV32I-NEXT:    lh s4, 8(a1)
@@ -432,35 +432,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s8, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __divsi3
+; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    mv s9, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __divsi3
+; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __divsi3
+; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __divsi3
+; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    add a0, s8, a0
 ; RV32I-NEXT:    add a1, s7, s1
 ; RV32I-NEXT:    add a2, s6, s4
@@ -469,17 +469,17 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    sh a2, 4(s0)
 ; RV32I-NEXT:    sh a1, 2(s0)
 ; RV32I-NEXT:    sh a0, 0(s0)
-; RV32I-NEXT:    lw s9, 4(sp)
-; RV32I-NEXT:    lw s8, 8(sp)
-; RV32I-NEXT:    lw s7, 12(sp)
-; RV32I-NEXT:    lw s6, 16(sp)
-; RV32I-NEXT:    lw s5, 20(sp)
-; RV32I-NEXT:    lw s4, 24(sp)
-; RV32I-NEXT:    lw s3, 28(sp)
-; RV32I-NEXT:    lw s2, 32(sp)
-; RV32I-NEXT:    lw s1, 36(sp)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s9, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;
@@ -533,17 +533,17 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: combine_srem_sdiv:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -96
-; RV64I-NEXT:    sd ra, 88(sp)
-; RV64I-NEXT:    sd s0, 80(sp)
-; RV64I-NEXT:    sd s1, 72(sp)
-; RV64I-NEXT:    sd s2, 64(sp)
-; RV64I-NEXT:    sd s3, 56(sp)
-; RV64I-NEXT:    sd s4, 48(sp)
-; RV64I-NEXT:    sd s5, 40(sp)
-; RV64I-NEXT:    sd s6, 32(sp)
-; RV64I-NEXT:    sd s7, 24(sp)
-; RV64I-NEXT:    sd s8, 16(sp)
-; RV64I-NEXT:    sd s9, 8(sp)
+; RV64I-NEXT:    sd ra, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lh s2, 0(a1)
 ; RV64I-NEXT:    lh s3, 8(a1)
 ; RV64I-NEXT:    lh s4, 16(a1)
@@ -551,35 +551,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s6, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s7, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s8, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    mv s9, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __divdi3
+; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    add a0, s8, a0
 ; RV64I-NEXT:    add a1, s7, s1
 ; RV64I-NEXT:    add a2, s6, s4
@@ -588,17 +588,17 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    sh a2, 4(s0)
 ; RV64I-NEXT:    sh a1, 2(s0)
 ; RV64I-NEXT:    sh a0, 0(s0)
-; RV64I-NEXT:    ld s9, 8(sp)
-; RV64I-NEXT:    ld s8, 16(sp)
-; RV64I-NEXT:    ld s7, 24(sp)
-; RV64I-NEXT:    ld s6, 32(sp)
-; RV64I-NEXT:    ld s5, 40(sp)
-; RV64I-NEXT:    ld s4, 48(sp)
-; RV64I-NEXT:    ld s3, 56(sp)
-; RV64I-NEXT:    ld s2, 64(sp)
-; RV64I-NEXT:    ld s1, 72(sp)
-; RV64I-NEXT:    ld s0, 80(sp)
-; RV64I-NEXT:    ld ra, 88(sp)
+; RV64I-NEXT:    ld s9, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 96
 ; RV64I-NEXT:    ret
 ;
@@ -665,11 +665,11 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: dont_fold_srem_power_of_two:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lh a2, 0(a1)
 ; RV32I-NEXT:    lh a0, 12(a1)
@@ -692,16 +692,16 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    and a1, a1, a2
 ; RV32I-NEXT:    sub s1, a3, a1
 ; RV32I-NEXT:    addi a1, zero, 95
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s1, 4(s0)
 ; RV32I-NEXT:    sh s3, 2(s0)
 ; RV32I-NEXT:    sh s2, 0(s0)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -746,11 +746,11 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: dont_fold_srem_power_of_two:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lh a2, 0(a1)
 ; RV64I-NEXT:    lh a0, 24(a1)
@@ -773,16 +773,16 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    sub s1, a3, a1
 ; RV64I-NEXT:    addi a1, zero, 95
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s1, 4(s0)
 ; RV64I-NEXT:    sh s3, 2(s0)
 ; RV64I-NEXT:    sh s2, 0(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -838,36 +838,36 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: dont_fold_srem_one:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lh s2, 12(a1)
 ; RV32I-NEXT:    lh s1, 8(a1)
 ; RV32I-NEXT:    lh a2, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 654
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    addi a1, zero, 23
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1327
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh zero, 0(s0)
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s1, 4(s0)
 ; RV32I-NEXT:    sh s3, 2(s0)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -915,36 +915,36 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: dont_fold_srem_one:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lh s2, 24(a1)
 ; RV64I-NEXT:    lh s1, 16(a1)
 ; RV64I-NEXT:    lh a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    addi a1, zero, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh zero, 0(s0)
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s1, 4(s0)
 ; RV64I-NEXT:    sh s3, 2(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -1014,11 +1014,11 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: dont_fold_urem_i16_smax:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lh a2, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lh s2, 12(a1)
@@ -1029,21 +1029,21 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    and a1, a1, a3
 ; RV32I-NEXT:    sub s3, a2, a1
 ; RV32I-NEXT:    addi a1, zero, 23
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1327
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __modsi3
+; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh zero, 0(s0)
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s1, 4(s0)
 ; RV32I-NEXT:    sh s3, 2(s0)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -1086,11 +1086,11 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: dont_fold_urem_i16_smax:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lh a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lh s2, 24(a1)
@@ -1101,21 +1101,21 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    sub s3, a2, a1
 ; RV64I-NEXT:    addi a1, zero, 23
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh zero, 0(s0)
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s1, 4(s0)
 ; RV64I-NEXT:    sh s3, 2(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -1175,17 +1175,17 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-LABEL: dont_fold_srem_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
-; RV32I-NEXT:    sw s1, 36(sp)
-; RV32I-NEXT:    sw s2, 32(sp)
-; RV32I-NEXT:    sw s3, 28(sp)
-; RV32I-NEXT:    sw s4, 24(sp)
-; RV32I-NEXT:    sw s5, 20(sp)
-; RV32I-NEXT:    sw s6, 16(sp)
-; RV32I-NEXT:    sw s7, 12(sp)
-; RV32I-NEXT:    sw s8, 8(sp)
-; RV32I-NEXT:    sw s9, 4(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw s2, 24(a1)
 ; RV32I-NEXT:    lw s3, 28(a1)
 ; RV32I-NEXT:    lw s4, 16(a1)
@@ -1198,21 +1198,21 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    addi a2, zero, 1
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __moddi3
+; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    mv s8, a1
 ; RV32I-NEXT:    addi a2, zero, 654
 ; RV32I-NEXT:    mv a0, s6
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __moddi3
+; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv s9, a1
 ; RV32I-NEXT:    addi a2, zero, 23
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __moddi3
+; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    lui a0, 1
@@ -1220,7 +1220,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __moddi3
+; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    sw a1, 28(s0)
 ; RV32I-NEXT:    sw a0, 24(s0)
 ; RV32I-NEXT:    sw s1, 20(s0)
@@ -1229,34 +1229,34 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    sw s6, 8(s0)
 ; RV32I-NEXT:    sw s8, 4(s0)
 ; RV32I-NEXT:    sw s7, 0(s0)
-; RV32I-NEXT:    lw s9, 4(sp)
-; RV32I-NEXT:    lw s8, 8(sp)
-; RV32I-NEXT:    lw s7, 12(sp)
-; RV32I-NEXT:    lw s6, 16(sp)
-; RV32I-NEXT:    lw s5, 20(sp)
-; RV32I-NEXT:    lw s4, 24(sp)
-; RV32I-NEXT:    lw s3, 28(sp)
-; RV32I-NEXT:    lw s2, 32(sp)
-; RV32I-NEXT:    lw s1, 36(sp)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s9, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: dont_fold_srem_i64:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -48
-; RV32IM-NEXT:    sw ra, 44(sp)
-; RV32IM-NEXT:    sw s0, 40(sp)
-; RV32IM-NEXT:    sw s1, 36(sp)
-; RV32IM-NEXT:    sw s2, 32(sp)
-; RV32IM-NEXT:    sw s3, 28(sp)
-; RV32IM-NEXT:    sw s4, 24(sp)
-; RV32IM-NEXT:    sw s5, 20(sp)
-; RV32IM-NEXT:    sw s6, 16(sp)
-; RV32IM-NEXT:    sw s7, 12(sp)
-; RV32IM-NEXT:    sw s8, 8(sp)
-; RV32IM-NEXT:    sw s9, 4(sp)
+; RV32IM-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s2, 32(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s3, 28(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s4, 24(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s5, 20(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s6, 16(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s9, 4(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    lw s2, 24(a1)
 ; RV32IM-NEXT:    lw s3, 28(a1)
 ; RV32IM-NEXT:    lw s4, 16(a1)
@@ -1269,21 +1269,21 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    addi a2, zero, 1
 ; RV32IM-NEXT:    mv a0, a3
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __moddi3
+; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    mv s7, a0
 ; RV32IM-NEXT:    mv s8, a1
 ; RV32IM-NEXT:    addi a2, zero, 654
 ; RV32IM-NEXT:    mv a0, s6
 ; RV32IM-NEXT:    mv a1, s1
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __moddi3
+; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    mv s6, a0
 ; RV32IM-NEXT:    mv s9, a1
 ; RV32IM-NEXT:    addi a2, zero, 23
 ; RV32IM-NEXT:    mv a0, s4
 ; RV32IM-NEXT:    mv a1, s5
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __moddi3
+; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    mv s4, a0
 ; RV32IM-NEXT:    mv s1, a1
 ; RV32IM-NEXT:    lui a0, 1
@@ -1291,7 +1291,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    mv a0, s2
 ; RV32IM-NEXT:    mv a1, s3
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __moddi3
+; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    sw a1, 28(s0)
 ; RV32IM-NEXT:    sw a0, 24(s0)
 ; RV32IM-NEXT:    sw s1, 20(s0)
@@ -1300,53 +1300,53 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    sw s6, 8(s0)
 ; RV32IM-NEXT:    sw s8, 4(s0)
 ; RV32IM-NEXT:    sw s7, 0(s0)
-; RV32IM-NEXT:    lw s9, 4(sp)
-; RV32IM-NEXT:    lw s8, 8(sp)
-; RV32IM-NEXT:    lw s7, 12(sp)
-; RV32IM-NEXT:    lw s6, 16(sp)
-; RV32IM-NEXT:    lw s5, 20(sp)
-; RV32IM-NEXT:    lw s4, 24(sp)
-; RV32IM-NEXT:    lw s3, 28(sp)
-; RV32IM-NEXT:    lw s2, 32(sp)
-; RV32IM-NEXT:    lw s1, 36(sp)
-; RV32IM-NEXT:    lw s0, 40(sp)
-; RV32IM-NEXT:    lw ra, 44(sp)
+; RV32IM-NEXT:    lw s9, 4(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s5, 20(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s3, 28(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s2, 32(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 48
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: dont_fold_srem_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    ld s2, 24(a1)
 ; RV64I-NEXT:    ld s1, 16(a1)
 ; RV64I-NEXT:    ld a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    addi a1, zero, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __moddi3
+; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sd zero, 0(s0)
 ; RV64I-NEXT:    sd a0, 24(s0)
 ; RV64I-NEXT:    sd s1, 16(s0)
 ; RV64I-NEXT:    sd s3, 8(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
index 293eab03b74b..edfa4b4200d3 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
@@ -11,9 +11,9 @@ define void @caller(i32 %n) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -128
 ; RV32I-NEXT:    .cfi_def_cfa_offset 128
-; RV32I-NEXT:    sw ra, 124(sp)
-; RV32I-NEXT:    sw s0, 120(sp)
-; RV32I-NEXT:    sw s1, 116(sp)
+; RV32I-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 116(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    .cfi_offset s0, -8
 ; RV32I-NEXT:    .cfi_offset s1, -12
@@ -26,11 +26,11 @@ define void @caller(i32 %n) {
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
 ; RV32I-NEXT:    addi a1, s1, 64
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    addi sp, s0, -128
-; RV32I-NEXT:    lw s1, 116(sp)
-; RV32I-NEXT:    lw s0, 120(sp)
-; RV32I-NEXT:    lw ra, 124(sp)
+; RV32I-NEXT:    lw s1, 116(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 128
 ; RV32I-NEXT:    ret
 ;
@@ -38,9 +38,9 @@ define void @caller(i32 %n) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -128
 ; RV64I-NEXT:    .cfi_def_cfa_offset 128
-; RV64I-NEXT:    sd ra, 120(sp)
-; RV64I-NEXT:    sd s0, 112(sp)
-; RV64I-NEXT:    sd s1, 104(sp)
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    .cfi_offset s0, -16
 ; RV64I-NEXT:    .cfi_offset s1, -24
@@ -58,11 +58,11 @@ define void @caller(i32 %n) {
 ; RV64I-NEXT:    sub a0, sp, a0
 ; RV64I-NEXT:    mv sp, a0
 ; RV64I-NEXT:    addi a1, s1, 64
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    addi sp, s0, -128
-; RV64I-NEXT:    ld s1, 104(sp)
-; RV64I-NEXT:    ld s0, 112(sp)
-; RV64I-NEXT:    ld ra, 120(sp)
+; RV64I-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
   %1 = alloca i8, i32 %n

diff  --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll
index c15e6e14b9ad..f37549eaca57 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll
@@ -10,30 +10,30 @@ define void @caller32() nounwind {
 ; RV32I-LABEL: caller32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -64
-; RV32I-NEXT:    sw ra, 60(sp)
-; RV32I-NEXT:    sw s0, 56(sp)
+; RV32I-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 64
 ; RV32I-NEXT:    andi sp, sp, -32
 ; RV32I-NEXT:    addi a0, sp, 32
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    addi sp, s0, -64
-; RV32I-NEXT:    lw s0, 56(sp)
-; RV32I-NEXT:    lw ra, 60(sp)
+; RV32I-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 64
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
-; RV64I-NEXT:    sd ra, 56(sp)
-; RV64I-NEXT:    sd s0, 48(sp)
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 64
 ; RV64I-NEXT:    andi sp, sp, -32
 ; RV64I-NEXT:    addi a0, sp, 32
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    addi sp, s0, -64
-; RV64I-NEXT:    ld s0, 48(sp)
-; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 32
@@ -45,20 +45,20 @@ define void @caller_no_realign32() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 32
@@ -70,30 +70,30 @@ define void @caller64() nounwind {
 ; RV32I-LABEL: caller64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -128
-; RV32I-NEXT:    sw ra, 124(sp)
-; RV32I-NEXT:    sw s0, 120(sp)
+; RV32I-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 128
 ; RV32I-NEXT:    andi sp, sp, -64
 ; RV32I-NEXT:    addi a0, sp, 64
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    addi sp, s0, -128
-; RV32I-NEXT:    lw s0, 120(sp)
-; RV32I-NEXT:    lw ra, 124(sp)
+; RV32I-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 128
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -128
-; RV64I-NEXT:    sd ra, 120(sp)
-; RV64I-NEXT:    sd s0, 112(sp)
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 128
 ; RV64I-NEXT:    andi sp, sp, -64
 ; RV64I-NEXT:    addi a0, sp, 64
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    addi sp, s0, -128
-; RV64I-NEXT:    ld s0, 112(sp)
-; RV64I-NEXT:    ld ra, 120(sp)
+; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 64
@@ -105,20 +105,20 @@ define void @caller_no_realign64() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 64
@@ -130,30 +130,30 @@ define void @caller128() nounwind {
 ; RV32I-LABEL: caller128:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -256
-; RV32I-NEXT:    sw ra, 252(sp)
-; RV32I-NEXT:    sw s0, 248(sp)
+; RV32I-NEXT:    sw ra, 252(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 248(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 256
 ; RV32I-NEXT:    andi sp, sp, -128
 ; RV32I-NEXT:    addi a0, sp, 128
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    addi sp, s0, -256
-; RV32I-NEXT:    lw s0, 248(sp)
-; RV32I-NEXT:    lw ra, 252(sp)
+; RV32I-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 256
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller128:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -256
-; RV64I-NEXT:    sd ra, 248(sp)
-; RV64I-NEXT:    sd s0, 240(sp)
+; RV64I-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 256
 ; RV64I-NEXT:    andi sp, sp, -128
 ; RV64I-NEXT:    addi a0, sp, 128
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    addi sp, s0, -256
-; RV64I-NEXT:    ld s0, 240(sp)
-; RV64I-NEXT:    ld ra, 248(sp)
+; RV64I-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 256
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 128
@@ -165,20 +165,20 @@ define void @caller_no_realign128() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign128:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign128:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 128
@@ -190,30 +190,30 @@ define void @caller256() nounwind {
 ; RV32I-LABEL: caller256:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -512
-; RV32I-NEXT:    sw ra, 508(sp)
-; RV32I-NEXT:    sw s0, 504(sp)
+; RV32I-NEXT:    sw ra, 508(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 504(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 512
 ; RV32I-NEXT:    andi sp, sp, -256
 ; RV32I-NEXT:    addi a0, sp, 256
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    addi sp, s0, -512
-; RV32I-NEXT:    lw s0, 504(sp)
-; RV32I-NEXT:    lw ra, 508(sp)
+; RV32I-NEXT:    lw s0, 504(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 508(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 512
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller256:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -512
-; RV64I-NEXT:    sd ra, 504(sp)
-; RV64I-NEXT:    sd s0, 496(sp)
+; RV64I-NEXT:    sd ra, 504(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 496(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 512
 ; RV64I-NEXT:    andi sp, sp, -256
 ; RV64I-NEXT:    addi a0, sp, 256
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    addi sp, s0, -512
-; RV64I-NEXT:    ld s0, 496(sp)
-; RV64I-NEXT:    ld ra, 504(sp)
+; RV64I-NEXT:    ld s0, 496(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 504(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 512
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 256
@@ -225,20 +225,20 @@ define void @caller_no_realign256() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign256:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign256:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 256
@@ -250,30 +250,30 @@ define void @caller512() nounwind {
 ; RV32I-LABEL: caller512:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -1536
-; RV32I-NEXT:    sw ra, 1532(sp)
-; RV32I-NEXT:    sw s0, 1528(sp)
+; RV32I-NEXT:    sw ra, 1532(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 1528(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 1536
 ; RV32I-NEXT:    andi sp, sp, -512
 ; RV32I-NEXT:    addi a0, sp, 1024
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    addi sp, s0, -1536
-; RV32I-NEXT:    lw s0, 1528(sp)
-; RV32I-NEXT:    lw ra, 1532(sp)
+; RV32I-NEXT:    lw s0, 1528(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 1532(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 1536
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller512:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -1536
-; RV64I-NEXT:    sd ra, 1528(sp)
-; RV64I-NEXT:    sd s0, 1520(sp)
+; RV64I-NEXT:    sd ra, 1528(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 1520(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 1536
 ; RV64I-NEXT:    andi sp, sp, -512
 ; RV64I-NEXT:    addi a0, sp, 1024
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    addi sp, s0, -1536
-; RV64I-NEXT:    ld s0, 1520(sp)
-; RV64I-NEXT:    ld ra, 1528(sp)
+; RV64I-NEXT:    ld s0, 1520(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 1528(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 1536
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 512
@@ -285,20 +285,20 @@ define void @caller_no_realign512() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign512:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign512:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 512
@@ -310,8 +310,8 @@ define void @caller1024() nounwind {
 ; RV32I-LABEL: caller1024:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -2032
-; RV32I-NEXT:    sw ra, 2028(sp)
-; RV32I-NEXT:    sw s0, 2024(sp)
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 2032
 ; RV32I-NEXT:    addi sp, sp, -1040
 ; RV32I-NEXT:    andi sp, sp, -1024
@@ -319,21 +319,21 @@ define void @caller1024() nounwind {
 ; RV32I-NEXT:    addi a0, a0, -2048
 ; RV32I-NEXT:    add a0, sp, a0
 ; RV32I-NEXT:    mv a0, a0
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -1024
 ; RV32I-NEXT:    sub sp, s0, a0
 ; RV32I-NEXT:    addi sp, sp, 1040
-; RV32I-NEXT:    lw s0, 2024(sp)
-; RV32I-NEXT:    lw ra, 2028(sp)
+; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller1024:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -2032
-; RV64I-NEXT:    sd ra, 2024(sp)
-; RV64I-NEXT:    sd s0, 2016(sp)
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 2032
 ; RV64I-NEXT:    addi sp, sp, -1040
 ; RV64I-NEXT:    andi sp, sp, -1024
@@ -341,13 +341,13 @@ define void @caller1024() nounwind {
 ; RV64I-NEXT:    addiw a0, a0, -2048
 ; RV64I-NEXT:    add a0, sp, a0
 ; RV64I-NEXT:    mv a0, a0
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -1024
 ; RV64I-NEXT:    sub sp, s0, a0
 ; RV64I-NEXT:    addi sp, sp, 1040
-; RV64I-NEXT:    ld s0, 2016(sp)
-; RV64I-NEXT:    ld ra, 2024(sp)
+; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 1024
@@ -359,20 +359,20 @@ define void @caller_no_realign1024() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign1024:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign1024:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 1024
@@ -384,8 +384,8 @@ define void @caller2048() nounwind {
 ; RV32I-LABEL: caller2048:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -2032
-; RV32I-NEXT:    sw ra, 2028(sp)
-; RV32I-NEXT:    sw s0, 2024(sp)
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 2032
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, 16
@@ -394,23 +394,23 @@ define void @caller2048() nounwind {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    add a0, sp, a0
 ; RV32I-NEXT:    mv a0, a0
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    lui a0, 2
 ; RV32I-NEXT:    addi a0, a0, -2048
 ; RV32I-NEXT:    sub sp, s0, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, 16
 ; RV32I-NEXT:    add sp, sp, a0
-; RV32I-NEXT:    lw s0, 2024(sp)
-; RV32I-NEXT:    lw ra, 2028(sp)
+; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller2048:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -2032
-; RV64I-NEXT:    sd ra, 2024(sp)
-; RV64I-NEXT:    sd s0, 2016(sp)
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 2032
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, 16
@@ -419,15 +419,15 @@ define void @caller2048() nounwind {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    add a0, sp, a0
 ; RV64I-NEXT:    mv a0, a0
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    lui a0, 2
 ; RV64I-NEXT:    addiw a0, a0, -2048
 ; RV64I-NEXT:    sub sp, s0, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, 16
 ; RV64I-NEXT:    add sp, sp, a0
-; RV64I-NEXT:    ld s0, 2016(sp)
-; RV64I-NEXT:    ld ra, 2024(sp)
+; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 2048
@@ -439,20 +439,20 @@ define void @caller_no_realign2048() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign2048:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign2048:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 2048
@@ -464,8 +464,8 @@ define void @caller4096() nounwind {
 ; RV32I-LABEL: caller4096:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -2032
-; RV32I-NEXT:    sw ra, 2028(sp)
-; RV32I-NEXT:    sw s0, 2024(sp)
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi s0, sp, 2032
 ; RV32I-NEXT:    lui a0, 3
 ; RV32I-NEXT:    addi a0, a0, -2032
@@ -475,22 +475,22 @@ define void @caller4096() nounwind {
 ; RV32I-NEXT:    lui a0, 2
 ; RV32I-NEXT:    add a0, sp, a0
 ; RV32I-NEXT:    mv a0, a0
-; RV32I-NEXT:    call callee
+; RV32I-NEXT:    call callee at plt
 ; RV32I-NEXT:    lui a0, 3
 ; RV32I-NEXT:    sub sp, s0, a0
 ; RV32I-NEXT:    lui a0, 3
 ; RV32I-NEXT:    addi a0, a0, -2032
 ; RV32I-NEXT:    add sp, sp, a0
-; RV32I-NEXT:    lw s0, 2024(sp)
-; RV32I-NEXT:    lw ra, 2028(sp)
+; RV32I-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller4096:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -2032
-; RV64I-NEXT:    sd ra, 2024(sp)
-; RV64I-NEXT:    sd s0, 2016(sp)
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi s0, sp, 2032
 ; RV64I-NEXT:    lui a0, 3
 ; RV64I-NEXT:    addiw a0, a0, -2032
@@ -500,14 +500,14 @@ define void @caller4096() nounwind {
 ; RV64I-NEXT:    lui a0, 2
 ; RV64I-NEXT:    add a0, sp, a0
 ; RV64I-NEXT:    mv a0, a0
-; RV64I-NEXT:    call callee
+; RV64I-NEXT:    call callee at plt
 ; RV64I-NEXT:    lui a0, 3
 ; RV64I-NEXT:    sub sp, s0, a0
 ; RV64I-NEXT:    lui a0, 3
 ; RV64I-NEXT:    addiw a0, a0, -2032
 ; RV64I-NEXT:    add sp, sp, a0
-; RV64I-NEXT:    ld s0, 2016(sp)
-; RV64I-NEXT:    ld ra, 2024(sp)
+; RV64I-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 2032
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 4096
@@ -519,20 +519,20 @@ define void @caller_no_realign4096() nounwind "no-realign-stack" {
 ; RV32I-LABEL: caller_no_realign4096:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a0, sp
-; RV32I-NEXT:    call callee
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call callee at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: caller_no_realign4096:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a0, sp
-; RV64I-NEXT:    call callee
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call callee at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = alloca i8, align 4096

diff  --git a/llvm/test/CodeGen/RISCV/stack-store-check.ll b/llvm/test/CodeGen/RISCV/stack-store-check.ll
index c8f733bd6ce9..d9770a94e9f0 100644
--- a/llvm/test/CodeGen/RISCV/stack-store-check.ll
+++ b/llvm/test/CodeGen/RISCV/stack-store-check.ll
@@ -15,19 +15,19 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-LABEL: main:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -688
-; CHECK-NEXT:    sw ra, 684(sp)
-; CHECK-NEXT:    sw s0, 680(sp)
-; CHECK-NEXT:    sw s1, 676(sp)
-; CHECK-NEXT:    sw s2, 672(sp)
-; CHECK-NEXT:    sw s3, 668(sp)
-; CHECK-NEXT:    sw s4, 664(sp)
-; CHECK-NEXT:    sw s5, 660(sp)
-; CHECK-NEXT:    sw s6, 656(sp)
-; CHECK-NEXT:    sw s7, 652(sp)
-; CHECK-NEXT:    sw s8, 648(sp)
-; CHECK-NEXT:    sw s9, 644(sp)
-; CHECK-NEXT:    sw s10, 640(sp)
-; CHECK-NEXT:    sw s11, 636(sp)
+; CHECK-NEXT:    sw ra, 684(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 680(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 676(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 672(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 668(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 664(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 660(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 656(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 652(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 648(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 644(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 640(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 636(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lui a0, %hi(U)
 ; CHECK-NEXT:    lw s6, %lo(U)(a0)
 ; CHECK-NEXT:    lw s7, %lo(U+4)(a0)
@@ -44,7 +44,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 600
 ; CHECK-NEXT:    addi a2, sp, 584
 ; CHECK-NEXT:    sw s6, 584(sp)
-; CHECK-NEXT:    call __subtf3
+; CHECK-NEXT:    call __subtf3 at plt
 ; CHECK-NEXT:    lw s3, 616(sp)
 ; CHECK-NEXT:    lw s4, 620(sp)
 ; CHECK-NEXT:    lw s9, 624(sp)
@@ -60,15 +60,15 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 552
 ; CHECK-NEXT:    addi a2, sp, 536
 ; CHECK-NEXT:    sw s3, 552(sp)
-; CHECK-NEXT:    call __subtf3
+; CHECK-NEXT:    call __subtf3 at plt
 ; CHECK-NEXT:    lw a0, 568(sp)
-; CHECK-NEXT:    sw a0, 40(sp)
+; CHECK-NEXT:    sw a0, 40(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 572(sp)
-; CHECK-NEXT:    sw a0, 32(sp)
+; CHECK-NEXT:    sw a0, 32(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 576(sp)
-; CHECK-NEXT:    sw a0, 24(sp)
+; CHECK-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 580(sp)
-; CHECK-NEXT:    sw a0, 16(sp)
+; CHECK-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw zero, 500(sp)
 ; CHECK-NEXT:    sw zero, 496(sp)
 ; CHECK-NEXT:    sw zero, 492(sp)
@@ -80,21 +80,21 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 504
 ; CHECK-NEXT:    addi a2, sp, 488
 ; CHECK-NEXT:    sw s6, 504(sp)
-; CHECK-NEXT:    call __addtf3
+; CHECK-NEXT:    call __addtf3 at plt
 ; CHECK-NEXT:    lw s2, 520(sp)
 ; CHECK-NEXT:    lw s10, 524(sp)
 ; CHECK-NEXT:    lw s5, 528(sp)
 ; CHECK-NEXT:    lw s1, 532(sp)
-; CHECK-NEXT:    sw s1, 8(sp)
+; CHECK-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lui a0, %hi(Y1)
 ; CHECK-NEXT:    lw a1, %lo(Y1)(a0)
-; CHECK-NEXT:    sw a1, 48(sp)
+; CHECK-NEXT:    sw a1, 48(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a2, %lo(Y1+4)(a0)
-; CHECK-NEXT:    sw a2, 52(sp)
+; CHECK-NEXT:    sw a2, 52(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a3, %lo(Y1+8)(a0)
-; CHECK-NEXT:    sw a3, 4(sp)
+; CHECK-NEXT:    sw a3, 4(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, %lo(Y1+12)(a0)
-; CHECK-NEXT:    sw a0, 0(sp)
+; CHECK-NEXT:    sw a0, 0(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw a0, 308(sp)
 ; CHECK-NEXT:    sw a3, 304(sp)
 ; CHECK-NEXT:    sw a2, 300(sp)
@@ -106,15 +106,15 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 312
 ; CHECK-NEXT:    addi a2, sp, 296
 ; CHECK-NEXT:    sw s3, 312(sp)
-; CHECK-NEXT:    call __multf3
+; CHECK-NEXT:    call __multf3 at plt
 ; CHECK-NEXT:    lw a0, 328(sp)
-; CHECK-NEXT:    sw a0, 44(sp)
+; CHECK-NEXT:    sw a0, 44(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 332(sp)
-; CHECK-NEXT:    sw a0, 36(sp)
+; CHECK-NEXT:    sw a0, 36(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 336(sp)
-; CHECK-NEXT:    sw a0, 28(sp)
+; CHECK-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw a0, 340(sp)
-; CHECK-NEXT:    sw a0, 20(sp)
+; CHECK-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw s0, 468(sp)
 ; CHECK-NEXT:    sw s8, 464(sp)
 ; CHECK-NEXT:    sw s7, 460(sp)
@@ -126,7 +126,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 456
 ; CHECK-NEXT:    addi a2, sp, 440
 ; CHECK-NEXT:    sw s2, 440(sp)
-; CHECK-NEXT:    call __addtf3
+; CHECK-NEXT:    call __addtf3 at plt
 ; CHECK-NEXT:    lw a3, 472(sp)
 ; CHECK-NEXT:    lw a0, 476(sp)
 ; CHECK-NEXT:    lw a1, 480(sp)
@@ -142,7 +142,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 408
 ; CHECK-NEXT:    addi a2, sp, 392
 ; CHECK-NEXT:    sw a3, 392(sp)
-; CHECK-NEXT:    call __subtf3
+; CHECK-NEXT:    call __subtf3 at plt
 ; CHECK-NEXT:    lw a0, 424(sp)
 ; CHECK-NEXT:    lw a1, 436(sp)
 ; CHECK-NEXT:    lw a2, 432(sp)
@@ -152,28 +152,28 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    sw a2, %lo(X+8)(a4)
 ; CHECK-NEXT:    sw a3, %lo(X+4)(a4)
 ; CHECK-NEXT:    sw a0, %lo(X)(a4)
-; CHECK-NEXT:    lw s8, 0(sp)
+; CHECK-NEXT:    lw s8, 0(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s8, 212(sp)
-; CHECK-NEXT:    lw s7, 4(sp)
+; CHECK-NEXT:    lw s7, 4(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s7, 208(sp)
-; CHECK-NEXT:    lw a0, 52(sp)
+; CHECK-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 204(sp)
-; CHECK-NEXT:    lw a0, 48(sp)
+; CHECK-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 200(sp)
-; CHECK-NEXT:    lw s6, 16(sp)
+; CHECK-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s6, 228(sp)
-; CHECK-NEXT:    lw s4, 24(sp)
+; CHECK-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s4, 224(sp)
-; CHECK-NEXT:    lw s0, 32(sp)
+; CHECK-NEXT:    lw s0, 32(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s0, 220(sp)
 ; CHECK-NEXT:    addi a0, sp, 232
 ; CHECK-NEXT:    addi a1, sp, 216
 ; CHECK-NEXT:    addi a2, sp, 200
-; CHECK-NEXT:    lw s1, 40(sp)
+; CHECK-NEXT:    lw s1, 40(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw s1, 216(sp)
-; CHECK-NEXT:    call __multf3
+; CHECK-NEXT:    call __multf3 at plt
 ; CHECK-NEXT:    lw a0, 232(sp)
-; CHECK-NEXT:    sw a0, 12(sp)
+; CHECK-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lw s3, 236(sp)
 ; CHECK-NEXT:    lw s9, 240(sp)
 ; CHECK-NEXT:    lw s11, 244(sp)
@@ -181,7 +181,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    sw zero, 352(sp)
 ; CHECK-NEXT:    sw zero, 348(sp)
 ; CHECK-NEXT:    sw zero, 344(sp)
-; CHECK-NEXT:    lw a0, 8(sp)
+; CHECK-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 372(sp)
 ; CHECK-NEXT:    sw s5, 368(sp)
 ; CHECK-NEXT:    sw s10, 364(sp)
@@ -189,7 +189,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 360
 ; CHECK-NEXT:    addi a2, sp, 344
 ; CHECK-NEXT:    sw s2, 360(sp)
-; CHECK-NEXT:    call __multf3
+; CHECK-NEXT:    call __multf3 at plt
 ; CHECK-NEXT:    lw a0, 376(sp)
 ; CHECK-NEXT:    lw a1, 388(sp)
 ; CHECK-NEXT:    lw a2, 384(sp)
@@ -203,18 +203,18 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    sw s4, 256(sp)
 ; CHECK-NEXT:    sw s0, 252(sp)
 ; CHECK-NEXT:    sw s1, 248(sp)
-; CHECK-NEXT:    lw a0, 20(sp)
+; CHECK-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 276(sp)
-; CHECK-NEXT:    lw a0, 28(sp)
+; CHECK-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 272(sp)
-; CHECK-NEXT:    lw a0, 36(sp)
+; CHECK-NEXT:    lw a0, 36(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 268(sp)
 ; CHECK-NEXT:    addi a0, sp, 280
 ; CHECK-NEXT:    addi a1, sp, 264
 ; CHECK-NEXT:    addi a2, sp, 248
-; CHECK-NEXT:    lw a3, 44(sp)
+; CHECK-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a3, 264(sp)
-; CHECK-NEXT:    call __subtf3
+; CHECK-NEXT:    call __subtf3 at plt
 ; CHECK-NEXT:    lw a0, 280(sp)
 ; CHECK-NEXT:    lw a1, 292(sp)
 ; CHECK-NEXT:    lw a2, 288(sp)
@@ -234,9 +234,9 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a0, sp, 184
 ; CHECK-NEXT:    addi a1, sp, 168
 ; CHECK-NEXT:    addi a2, sp, 152
-; CHECK-NEXT:    lw a3, 12(sp)
+; CHECK-NEXT:    lw a3, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a3, 168(sp)
-; CHECK-NEXT:    call __addtf3
+; CHECK-NEXT:    call __addtf3 at plt
 ; CHECK-NEXT:    lw a0, 184(sp)
 ; CHECK-NEXT:    lw a1, 196(sp)
 ; CHECK-NEXT:    lw a2, 192(sp)
@@ -252,14 +252,14 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    sw zero, 104(sp)
 ; CHECK-NEXT:    sw s8, 132(sp)
 ; CHECK-NEXT:    sw s7, 128(sp)
-; CHECK-NEXT:    lw a0, 52(sp)
+; CHECK-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a0, 124(sp)
 ; CHECK-NEXT:    addi a0, sp, 136
 ; CHECK-NEXT:    addi a1, sp, 120
 ; CHECK-NEXT:    addi a2, sp, 104
-; CHECK-NEXT:    lw a3, 48(sp)
+; CHECK-NEXT:    lw a3, 48(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    sw a3, 120(sp)
-; CHECK-NEXT:    call __multf3
+; CHECK-NEXT:    call __multf3 at plt
 ; CHECK-NEXT:    lw a3, 136(sp)
 ; CHECK-NEXT:    lw a0, 140(sp)
 ; CHECK-NEXT:    lw a1, 144(sp)
@@ -276,7 +276,7 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    addi a1, sp, 72
 ; CHECK-NEXT:    addi a2, sp, 56
 ; CHECK-NEXT:    sw a3, 72(sp)
-; CHECK-NEXT:    call __addtf3
+; CHECK-NEXT:    call __addtf3 at plt
 ; CHECK-NEXT:    lw a0, 96(sp)
 ; CHECK-NEXT:    lw a1, 100(sp)
 ; CHECK-NEXT:    lw a2, 88(sp)
@@ -286,19 +286,19 @@ define void @main() local_unnamed_addr nounwind {
 ; CHECK-NEXT:    sw a1, %lo(Y1+12)(a4)
 ; CHECK-NEXT:    sw a2, %lo(Y1)(a4)
 ; CHECK-NEXT:    sw a3, %lo(Y1+4)(a4)
-; CHECK-NEXT:    lw s11, 636(sp)
-; CHECK-NEXT:    lw s10, 640(sp)
-; CHECK-NEXT:    lw s9, 644(sp)
-; CHECK-NEXT:    lw s8, 648(sp)
-; CHECK-NEXT:    lw s7, 652(sp)
-; CHECK-NEXT:    lw s6, 656(sp)
-; CHECK-NEXT:    lw s5, 660(sp)
-; CHECK-NEXT:    lw s4, 664(sp)
-; CHECK-NEXT:    lw s3, 668(sp)
-; CHECK-NEXT:    lw s2, 672(sp)
-; CHECK-NEXT:    lw s1, 676(sp)
-; CHECK-NEXT:    lw s0, 680(sp)
-; CHECK-NEXT:    lw ra, 684(sp)
+; CHECK-NEXT:    lw s11, 636(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 640(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 644(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 648(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 652(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 656(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 660(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 664(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 668(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 672(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 676(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 680(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw ra, 684(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 688
 ; CHECK-NEXT:    ret
   %1 = load fp128, fp128* @U, align 16

diff  --git a/llvm/test/CodeGen/RISCV/tls-models.ll b/llvm/test/CodeGen/RISCV/tls-models.ll
index 25a2f71beb31..e3836eefad7c 100644
--- a/llvm/test/CodeGen/RISCV/tls-models.ll
+++ b/llvm/test/CodeGen/RISCV/tls-models.ll
@@ -22,26 +22,26 @@ define i32* @f1() nounwind {
 ; RV32-PIC-LABEL: f1:
 ; RV32-PIC:       # %bb.0: # %entry
 ; RV32-PIC-NEXT:    addi sp, sp, -16
-; RV32-PIC-NEXT:    sw ra, 12(sp)
+; RV32-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-PIC-NEXT:  .LBB0_1: # %entry
 ; RV32-PIC-NEXT:    # Label of block must be emitted
 ; RV32-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(unspecified)
 ; RV32-PIC-NEXT:    addi a0, a0, %pcrel_lo(.LBB0_1)
 ; RV32-PIC-NEXT:    call __tls_get_addr at plt
-; RV32-PIC-NEXT:    lw ra, 12(sp)
+; RV32-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-PIC-NEXT:    addi sp, sp, 16
 ; RV32-PIC-NEXT:    ret
 ;
 ; RV64-PIC-LABEL: f1:
 ; RV64-PIC:       # %bb.0: # %entry
 ; RV64-PIC-NEXT:    addi sp, sp, -16
-; RV64-PIC-NEXT:    sd ra, 8(sp)
+; RV64-PIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-PIC-NEXT:  .LBB0_1: # %entry
 ; RV64-PIC-NEXT:    # Label of block must be emitted
 ; RV64-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(unspecified)
 ; RV64-PIC-NEXT:    addi a0, a0, %pcrel_lo(.LBB0_1)
 ; RV64-PIC-NEXT:    call __tls_get_addr at plt
-; RV64-PIC-NEXT:    ld ra, 8(sp)
+; RV64-PIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-PIC-NEXT:    addi sp, sp, 16
 ; RV64-PIC-NEXT:    ret
 ;
@@ -73,26 +73,26 @@ define i32* @f2() nounwind {
 ; RV32-PIC-LABEL: f2:
 ; RV32-PIC:       # %bb.0: # %entry
 ; RV32-PIC-NEXT:    addi sp, sp, -16
-; RV32-PIC-NEXT:    sw ra, 12(sp)
+; RV32-PIC-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-PIC-NEXT:  .LBB1_1: # %entry
 ; RV32-PIC-NEXT:    # Label of block must be emitted
 ; RV32-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(ld)
 ; RV32-PIC-NEXT:    addi a0, a0, %pcrel_lo(.LBB1_1)
 ; RV32-PIC-NEXT:    call __tls_get_addr at plt
-; RV32-PIC-NEXT:    lw ra, 12(sp)
+; RV32-PIC-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-PIC-NEXT:    addi sp, sp, 16
 ; RV32-PIC-NEXT:    ret
 ;
 ; RV64-PIC-LABEL: f2:
 ; RV64-PIC:       # %bb.0: # %entry
 ; RV64-PIC-NEXT:    addi sp, sp, -16
-; RV64-PIC-NEXT:    sd ra, 8(sp)
+; RV64-PIC-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-PIC-NEXT:  .LBB1_1: # %entry
 ; RV64-PIC-NEXT:    # Label of block must be emitted
 ; RV64-PIC-NEXT:    auipc a0, %tls_gd_pcrel_hi(ld)
 ; RV64-PIC-NEXT:    addi a0, a0, %pcrel_lo(.LBB1_1)
 ; RV64-PIC-NEXT:    call __tls_get_addr at plt
-; RV64-PIC-NEXT:    ld ra, 8(sp)
+; RV64-PIC-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-PIC-NEXT:    addi sp, sp, 16
 ; RV64-PIC-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
index 30346a134a78..558548d8e16e 100644
--- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll
@@ -5,16 +5,16 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 {
 ; RISCV32-LABEL: muloti_test:
 ; RISCV32:       # %bb.0: # %start
 ; RISCV32-NEXT:    addi sp, sp, -96
-; RISCV32-NEXT:    sw ra, 92(sp)
-; RISCV32-NEXT:    sw s0, 88(sp)
-; RISCV32-NEXT:    sw s1, 84(sp)
-; RISCV32-NEXT:    sw s2, 80(sp)
-; RISCV32-NEXT:    sw s3, 76(sp)
-; RISCV32-NEXT:    sw s4, 72(sp)
-; RISCV32-NEXT:    sw s5, 68(sp)
-; RISCV32-NEXT:    sw s6, 64(sp)
-; RISCV32-NEXT:    sw s7, 60(sp)
-; RISCV32-NEXT:    sw s8, 56(sp)
+; RISCV32-NEXT:    sw ra, 92(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s0, 88(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s1, 84(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s2, 80(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s3, 76(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s4, 72(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s5, 68(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s6, 64(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s7, 60(sp) # 4-byte Folded Spill
+; RISCV32-NEXT:    sw s8, 56(sp) # 4-byte Folded Spill
 ; RISCV32-NEXT:    lw s2, 12(a1)
 ; RISCV32-NEXT:    lw s6, 8(a1)
 ; RISCV32-NEXT:    lw s3, 12(a2)
@@ -35,7 +35,7 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 {
 ; RISCV32-NEXT:    addi a1, sp, 24
 ; RISCV32-NEXT:    addi a2, sp, 8
 ; RISCV32-NEXT:    sw s0, 24(sp)
-; RISCV32-NEXT:    call __multi3
+; RISCV32-NEXT:    call __multi3 at plt
 ; RISCV32-NEXT:    mul a0, s8, s7
 ; RISCV32-NEXT:    mul a1, s3, s0
 ; RISCV32-NEXT:    add a0, a1, a0
@@ -100,16 +100,16 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 {
 ; RISCV32-NEXT:    sw a6, 8(s4)
 ; RISCV32-NEXT:    sw a7, 12(s4)
 ; RISCV32-NEXT:    sb a0, 16(s4)
-; RISCV32-NEXT:    lw s8, 56(sp)
-; RISCV32-NEXT:    lw s7, 60(sp)
-; RISCV32-NEXT:    lw s6, 64(sp)
-; RISCV32-NEXT:    lw s5, 68(sp)
-; RISCV32-NEXT:    lw s4, 72(sp)
-; RISCV32-NEXT:    lw s3, 76(sp)
-; RISCV32-NEXT:    lw s2, 80(sp)
-; RISCV32-NEXT:    lw s1, 84(sp)
-; RISCV32-NEXT:    lw s0, 88(sp)
-; RISCV32-NEXT:    lw ra, 92(sp)
+; RISCV32-NEXT:    lw s8, 56(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s7, 60(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s6, 64(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s5, 68(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s4, 72(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s3, 76(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s2, 80(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s1, 84(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw s0, 88(sp) # 4-byte Folded Reload
+; RISCV32-NEXT:    lw ra, 92(sp) # 4-byte Folded Reload
 ; RISCV32-NEXT:    addi sp, sp, 96
 ; RISCV32-NEXT:    ret
 start:

diff  --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index 5286ad507860..4d9ad2527cbb 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -12,10 +12,10 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_urem_positive_odd:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 95
-; RV32I-NEXT:    call __umodsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -36,12 +36,12 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV64I-LABEL: fold_urem_positive_odd:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    addi a1, zero, 95
-; RV64I-NEXT:    call __umoddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -75,10 +75,10 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_urem_positive_even:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a1, zero, 1060
-; RV32I-NEXT:    call __umodsi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -96,12 +96,12 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV64I-LABEL: fold_urem_positive_even:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    addi a1, zero, 1060
-; RV64I-NEXT:    call __umoddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -133,20 +133,20 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV32I-LABEL: combine_urem_udiv:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
-; RV32I-NEXT:    sw s0, 8(sp)
-; RV32I-NEXT:    sw s1, 4(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __udivsi3
+; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    add a0, s1, a0
-; RV32I-NEXT:    lw s1, 4(sp)
-; RV32I-NEXT:    lw s0, 8(sp)
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -168,22 +168,22 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV64I-LABEL: combine_urem_udiv:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
-; RV64I-NEXT:    sd ra, 24(sp)
-; RV64I-NEXT:    sd s0, 16(sp)
-; RV64I-NEXT:    sd s1, 8(sp)
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli s0, a0, 32
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    add a0, s1, a0
-; RV64I-NEXT:    ld s1, 8(sp)
-; RV64I-NEXT:    ld s0, 16(sp)
-; RV64I-NEXT:    ld ra, 24(sp)
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
@@ -249,32 +249,32 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV32I-LABEL: dont_fold_urem_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a2, zero, 98
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __umoddi3
-; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: dont_fold_urem_i64:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
-; RV32IM-NEXT:    sw ra, 12(sp)
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    addi a2, zero, 98
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __umoddi3
-; RV32IM-NEXT:    lw ra, 12(sp)
+; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: dont_fold_urem_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, zero, 98
-; RV64I-NEXT:    call __umoddi3
-; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 0ae2575da83e..e91b0834e7d2 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -13,13 +13,13 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: fold_urem_vec_1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lhu s2, 12(a1)
 ; RV32I-NEXT:    lhu s3, 8(a1)
 ; RV32I-NEXT:    lhu s0, 4(a1)
@@ -27,30 +27,30 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, zero, 124
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    addi a1, zero, 98
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 1003
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
 ; RV32I-NEXT:    sh s0, 4(s1)
 ; RV32I-NEXT:    sh s5, 2(s1)
 ; RV32I-NEXT:    sh s4, 0(s1)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -101,13 +101,13 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: fold_urem_vec_1:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
-; RV64I-NEXT:    sd ra, 56(sp)
-; RV64I-NEXT:    sd s0, 48(sp)
-; RV64I-NEXT:    sd s1, 40(sp)
-; RV64I-NEXT:    sd s2, 32(sp)
-; RV64I-NEXT:    sd s3, 24(sp)
-; RV64I-NEXT:    sd s4, 16(sp)
-; RV64I-NEXT:    sd s5, 8(sp)
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lhu s2, 24(a1)
 ; RV64I-NEXT:    lhu s3, 16(a1)
 ; RV64I-NEXT:    lhu s0, 8(a1)
@@ -115,30 +115,30 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, zero, 124
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    addi a1, zero, 98
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 1003
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
 ; RV64I-NEXT:    sh s0, 4(s1)
 ; RV64I-NEXT:    sh s5, 2(s1)
 ; RV64I-NEXT:    sh s4, 0(s1)
-; RV64I-NEXT:    ld s5, 8(sp)
-; RV64I-NEXT:    ld s4, 16(sp)
-; RV64I-NEXT:    ld s3, 24(sp)
-; RV64I-NEXT:    ld s2, 32(sp)
-; RV64I-NEXT:    ld s1, 40(sp)
-; RV64I-NEXT:    ld s0, 48(sp)
-; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
 ;
@@ -218,13 +218,13 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: fold_urem_vec_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
-; RV32I-NEXT:    sw s4, 8(sp)
-; RV32I-NEXT:    sw s5, 4(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lhu s2, 12(a1)
 ; RV32I-NEXT:    lhu s3, 8(a1)
 ; RV32I-NEXT:    lhu s0, 4(a1)
@@ -232,30 +232,30 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
 ; RV32I-NEXT:    sh s0, 4(s1)
 ; RV32I-NEXT:    sh s5, 2(s1)
 ; RV32I-NEXT:    sh s4, 0(s1)
-; RV32I-NEXT:    lw s5, 4(sp)
-; RV32I-NEXT:    lw s4, 8(sp)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -305,13 +305,13 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: fold_urem_vec_2:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
-; RV64I-NEXT:    sd ra, 56(sp)
-; RV64I-NEXT:    sd s0, 48(sp)
-; RV64I-NEXT:    sd s1, 40(sp)
-; RV64I-NEXT:    sd s2, 32(sp)
-; RV64I-NEXT:    sd s3, 24(sp)
-; RV64I-NEXT:    sd s4, 16(sp)
-; RV64I-NEXT:    sd s5, 8(sp)
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lhu s2, 24(a1)
 ; RV64I-NEXT:    lhu s3, 16(a1)
 ; RV64I-NEXT:    lhu s0, 8(a1)
@@ -319,30 +319,30 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
 ; RV64I-NEXT:    sh s0, 4(s1)
 ; RV64I-NEXT:    sh s5, 2(s1)
 ; RV64I-NEXT:    sh s4, 0(s1)
-; RV64I-NEXT:    ld s5, 8(sp)
-; RV64I-NEXT:    ld s4, 16(sp)
-; RV64I-NEXT:    ld s3, 24(sp)
-; RV64I-NEXT:    ld s2, 32(sp)
-; RV64I-NEXT:    ld s1, 40(sp)
-; RV64I-NEXT:    ld s0, 48(sp)
-; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
 ; RV64I-NEXT:    ret
 ;
@@ -404,17 +404,17 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: combine_urem_udiv:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
-; RV32I-NEXT:    sw s1, 36(sp)
-; RV32I-NEXT:    sw s2, 32(sp)
-; RV32I-NEXT:    sw s3, 28(sp)
-; RV32I-NEXT:    sw s4, 24(sp)
-; RV32I-NEXT:    sw s5, 20(sp)
-; RV32I-NEXT:    sw s6, 16(sp)
-; RV32I-NEXT:    sw s7, 12(sp)
-; RV32I-NEXT:    sw s8, 8(sp)
-; RV32I-NEXT:    sw s9, 4(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lhu s2, 0(a1)
 ; RV32I-NEXT:    lhu s3, 4(a1)
 ; RV32I-NEXT:    lhu s4, 8(a1)
@@ -422,35 +422,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s8, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __udivsi3
+; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    mv s9, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s4
-; RV32I-NEXT:    call __udivsi3
+; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    call __udivsi3
+; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __udivsi3
+; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    add a0, s8, a0
 ; RV32I-NEXT:    add a1, s7, s1
 ; RV32I-NEXT:    add a2, s6, s4
@@ -459,17 +459,17 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    sh a2, 4(s0)
 ; RV32I-NEXT:    sh a1, 2(s0)
 ; RV32I-NEXT:    sh a0, 0(s0)
-; RV32I-NEXT:    lw s9, 4(sp)
-; RV32I-NEXT:    lw s8, 8(sp)
-; RV32I-NEXT:    lw s7, 12(sp)
-; RV32I-NEXT:    lw s6, 16(sp)
-; RV32I-NEXT:    lw s5, 20(sp)
-; RV32I-NEXT:    lw s4, 24(sp)
-; RV32I-NEXT:    lw s3, 28(sp)
-; RV32I-NEXT:    lw s2, 32(sp)
-; RV32I-NEXT:    lw s1, 36(sp)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s9, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;
@@ -523,17 +523,17 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: combine_urem_udiv:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -96
-; RV64I-NEXT:    sd ra, 88(sp)
-; RV64I-NEXT:    sd s0, 80(sp)
-; RV64I-NEXT:    sd s1, 72(sp)
-; RV64I-NEXT:    sd s2, 64(sp)
-; RV64I-NEXT:    sd s3, 56(sp)
-; RV64I-NEXT:    sd s4, 48(sp)
-; RV64I-NEXT:    sd s5, 40(sp)
-; RV64I-NEXT:    sd s6, 32(sp)
-; RV64I-NEXT:    sd s7, 24(sp)
-; RV64I-NEXT:    sd s8, 16(sp)
-; RV64I-NEXT:    sd s9, 8(sp)
+; RV64I-NEXT:    sd ra, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lhu s2, 0(a1)
 ; RV64I-NEXT:    lhu s3, 8(a1)
 ; RV64I-NEXT:    lhu s4, 16(a1)
@@ -541,35 +541,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s6, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s7, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s8, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    mv s9, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s4
-; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __udivdi3
+; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    add a0, s8, a0
 ; RV64I-NEXT:    add a1, s7, s1
 ; RV64I-NEXT:    add a2, s6, s4
@@ -578,17 +578,17 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    sh a2, 4(s0)
 ; RV64I-NEXT:    sh a1, 2(s0)
 ; RV64I-NEXT:    sh a0, 0(s0)
-; RV64I-NEXT:    ld s9, 8(sp)
-; RV64I-NEXT:    ld s8, 16(sp)
-; RV64I-NEXT:    ld s7, 24(sp)
-; RV64I-NEXT:    ld s6, 32(sp)
-; RV64I-NEXT:    ld s5, 40(sp)
-; RV64I-NEXT:    ld s4, 48(sp)
-; RV64I-NEXT:    ld s3, 56(sp)
-; RV64I-NEXT:    ld s2, 64(sp)
-; RV64I-NEXT:    ld s1, 72(sp)
-; RV64I-NEXT:    ld s0, 80(sp)
-; RV64I-NEXT:    ld ra, 88(sp)
+; RV64I-NEXT:    ld s9, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 96
 ; RV64I-NEXT:    ret
 ;
@@ -655,11 +655,11 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: dont_fold_urem_power_of_two:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lhu s2, 8(a1)
 ; RV32I-NEXT:    lhu s3, 4(a1)
 ; RV32I-NEXT:    lhu s1, 0(a1)
@@ -667,7 +667,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 95
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    andi a1, s1, 63
 ; RV32I-NEXT:    andi a2, s3, 31
 ; RV32I-NEXT:    andi a3, s2, 7
@@ -675,11 +675,11 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    sh a3, 4(s0)
 ; RV32I-NEXT:    sh a2, 2(s0)
 ; RV32I-NEXT:    sh a1, 0(s0)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -711,11 +711,11 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: dont_fold_urem_power_of_two:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lhu s2, 16(a1)
 ; RV64I-NEXT:    lhu s3, 8(a1)
 ; RV64I-NEXT:    lhu s1, 0(a1)
@@ -723,7 +723,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    andi a1, s1, 63
 ; RV64I-NEXT:    andi a2, s3, 31
 ; RV64I-NEXT:    andi a3, s2, 7
@@ -731,11 +731,11 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    sh a3, 4(s0)
 ; RV64I-NEXT:    sh a2, 2(s0)
 ; RV64I-NEXT:    sh a1, 0(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -778,36 +778,36 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV32I-LABEL: dont_fold_urem_one:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp)
-; RV32I-NEXT:    sw s0, 24(sp)
-; RV32I-NEXT:    sw s1, 20(sp)
-; RV32I-NEXT:    sw s2, 16(sp)
-; RV32I-NEXT:    sw s3, 12(sp)
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lhu s2, 12(a1)
 ; RV32I-NEXT:    lhu s1, 8(a1)
 ; RV32I-NEXT:    lhu a2, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    addi a1, zero, 654
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    addi a1, zero, 23
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1327
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    call __umodsi3
+; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    sh zero, 0(s0)
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s1, 4(s0)
 ; RV32I-NEXT:    sh s3, 2(s0)
-; RV32I-NEXT:    lw s3, 12(sp)
-; RV32I-NEXT:    lw s2, 16(sp)
-; RV32I-NEXT:    lw s1, 20(sp)
-; RV32I-NEXT:    lw s0, 24(sp)
-; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -848,36 +848,36 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64I-LABEL: dont_fold_urem_one:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lhu s2, 24(a1)
 ; RV64I-NEXT:    lhu s1, 16(a1)
 ; RV64I-NEXT:    lhu a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    addi a1, zero, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    sh zero, 0(s0)
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s1, 4(s0)
 ; RV64I-NEXT:    sh s3, 2(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;
@@ -953,17 +953,17 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-LABEL: dont_fold_urem_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
-; RV32I-NEXT:    sw s1, 36(sp)
-; RV32I-NEXT:    sw s2, 32(sp)
-; RV32I-NEXT:    sw s3, 28(sp)
-; RV32I-NEXT:    sw s4, 24(sp)
-; RV32I-NEXT:    sw s5, 20(sp)
-; RV32I-NEXT:    sw s6, 16(sp)
-; RV32I-NEXT:    sw s7, 12(sp)
-; RV32I-NEXT:    sw s8, 8(sp)
-; RV32I-NEXT:    sw s9, 4(sp)
+; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s9, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw s2, 24(a1)
 ; RV32I-NEXT:    lw s3, 28(a1)
 ; RV32I-NEXT:    lw s4, 16(a1)
@@ -976,21 +976,21 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    addi a2, zero, 1
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __umoddi3
+; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    mv s8, a1
 ; RV32I-NEXT:    addi a2, zero, 654
 ; RV32I-NEXT:    mv a0, s6
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __umoddi3
+; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv s9, a1
 ; RV32I-NEXT:    addi a2, zero, 23
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __umoddi3
+; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    lui a0, 1
@@ -998,7 +998,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    call __umoddi3
+; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    sw a1, 28(s0)
 ; RV32I-NEXT:    sw a0, 24(s0)
 ; RV32I-NEXT:    sw s1, 20(s0)
@@ -1007,34 +1007,34 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    sw s6, 8(s0)
 ; RV32I-NEXT:    sw s8, 4(s0)
 ; RV32I-NEXT:    sw s7, 0(s0)
-; RV32I-NEXT:    lw s9, 4(sp)
-; RV32I-NEXT:    lw s8, 8(sp)
-; RV32I-NEXT:    lw s7, 12(sp)
-; RV32I-NEXT:    lw s6, 16(sp)
-; RV32I-NEXT:    lw s5, 20(sp)
-; RV32I-NEXT:    lw s4, 24(sp)
-; RV32I-NEXT:    lw s3, 28(sp)
-; RV32I-NEXT:    lw s2, 32(sp)
-; RV32I-NEXT:    lw s1, 36(sp)
-; RV32I-NEXT:    lw s0, 40(sp)
-; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    lw s9, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 48
 ; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: dont_fold_urem_i64:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -48
-; RV32IM-NEXT:    sw ra, 44(sp)
-; RV32IM-NEXT:    sw s0, 40(sp)
-; RV32IM-NEXT:    sw s1, 36(sp)
-; RV32IM-NEXT:    sw s2, 32(sp)
-; RV32IM-NEXT:    sw s3, 28(sp)
-; RV32IM-NEXT:    sw s4, 24(sp)
-; RV32IM-NEXT:    sw s5, 20(sp)
-; RV32IM-NEXT:    sw s6, 16(sp)
-; RV32IM-NEXT:    sw s7, 12(sp)
-; RV32IM-NEXT:    sw s8, 8(sp)
-; RV32IM-NEXT:    sw s9, 4(sp)
+; RV32IM-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s2, 32(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s3, 28(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s4, 24(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s5, 20(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s6, 16(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    sw s9, 4(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    lw s2, 24(a1)
 ; RV32IM-NEXT:    lw s3, 28(a1)
 ; RV32IM-NEXT:    lw s4, 16(a1)
@@ -1047,21 +1047,21 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    addi a2, zero, 1
 ; RV32IM-NEXT:    mv a0, a3
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __umoddi3
+; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    mv s7, a0
 ; RV32IM-NEXT:    mv s8, a1
 ; RV32IM-NEXT:    addi a2, zero, 654
 ; RV32IM-NEXT:    mv a0, s6
 ; RV32IM-NEXT:    mv a1, s1
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __umoddi3
+; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    mv s6, a0
 ; RV32IM-NEXT:    mv s9, a1
 ; RV32IM-NEXT:    addi a2, zero, 23
 ; RV32IM-NEXT:    mv a0, s4
 ; RV32IM-NEXT:    mv a1, s5
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __umoddi3
+; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    mv s4, a0
 ; RV32IM-NEXT:    mv s1, a1
 ; RV32IM-NEXT:    lui a0, 1
@@ -1069,7 +1069,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    mv a0, s2
 ; RV32IM-NEXT:    mv a1, s3
 ; RV32IM-NEXT:    mv a3, zero
-; RV32IM-NEXT:    call __umoddi3
+; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    sw a1, 28(s0)
 ; RV32IM-NEXT:    sw a0, 24(s0)
 ; RV32IM-NEXT:    sw s1, 20(s0)
@@ -1078,53 +1078,53 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    sw s6, 8(s0)
 ; RV32IM-NEXT:    sw s8, 4(s0)
 ; RV32IM-NEXT:    sw s7, 0(s0)
-; RV32IM-NEXT:    lw s9, 4(sp)
-; RV32IM-NEXT:    lw s8, 8(sp)
-; RV32IM-NEXT:    lw s7, 12(sp)
-; RV32IM-NEXT:    lw s6, 16(sp)
-; RV32IM-NEXT:    lw s5, 20(sp)
-; RV32IM-NEXT:    lw s4, 24(sp)
-; RV32IM-NEXT:    lw s3, 28(sp)
-; RV32IM-NEXT:    lw s2, 32(sp)
-; RV32IM-NEXT:    lw s1, 36(sp)
-; RV32IM-NEXT:    lw s0, 40(sp)
-; RV32IM-NEXT:    lw ra, 44(sp)
+; RV32IM-NEXT:    lw s9, 4(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s6, 16(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s5, 20(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s4, 24(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s3, 28(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s2, 32(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 48
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: dont_fold_urem_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
-; RV64I-NEXT:    sd ra, 40(sp)
-; RV64I-NEXT:    sd s0, 32(sp)
-; RV64I-NEXT:    sd s1, 24(sp)
-; RV64I-NEXT:    sd s2, 16(sp)
-; RV64I-NEXT:    sd s3, 8(sp)
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    ld s2, 24(a1)
 ; RV64I-NEXT:    ld s1, 16(a1)
 ; RV64I-NEXT:    ld a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    addi a1, zero, 654
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    addi a1, zero, 23
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a1, a0, 1327
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    call __umoddi3
+; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    sd zero, 0(s0)
 ; RV64I-NEXT:    sd a0, 24(s0)
 ; RV64I-NEXT:    sd s1, 16(s0)
 ; RV64I-NEXT:    sd s3, 8(s0)
-; RV64I-NEXT:    ld s3, 8(sp)
-; RV64I-NEXT:    ld s2, 16(sp)
-; RV64I-NEXT:    ld s1, 24(sp)
-; RV64I-NEXT:    ld s0, 32(sp)
-; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 48
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 9f65576d9786..918b1850b3a3 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -59,8 +59,8 @@ define i32 @va1(i8* %fmt, ...) {
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
 ; ILP32-ILP32F-WITHFP-NEXT:    .cfi_def_cfa_offset 48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    .cfi_offset ra, -36
 ; ILP32-ILP32F-WITHFP-NEXT:    .cfi_offset s0, -40
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
@@ -75,8 +75,8 @@ define i32 @va1(i8* %fmt, ...) {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, 4(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, s0, 8
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, -12(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -119,8 +119,8 @@ define i32 @va1(i8* %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 96
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset ra, -72
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset s0, -80
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
@@ -136,8 +136,8 @@ define i32 @va1(i8* %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ori a0, a0, 4
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -24(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a0, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -172,8 +172,8 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-LABEL: va1_va_arg:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a0, a1
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
@@ -185,8 +185,8 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, 4(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, s0, 8
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, -12(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -226,8 +226,8 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
@@ -240,8 +240,8 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a1, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -258,9 +258,9 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-LABEL: va1_va_arg_alloca:
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw s0, 8(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw s1, 4(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-FPELIM-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    mv s1, a1
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a7, 28(s0)
@@ -276,21 +276,21 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    andi a0, a0, -16
 ; ILP32-ILP32F-FPELIM-NEXT:    sub a0, sp, a0
 ; ILP32-ILP32F-FPELIM-NEXT:    mv sp, a0
-; ILP32-ILP32F-FPELIM-NEXT:    call notdead
+; ILP32-ILP32F-FPELIM-NEXT:    call notdead at plt
 ; ILP32-ILP32F-FPELIM-NEXT:    mv a0, s1
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, s0, -16
-; ILP32-ILP32F-FPELIM-NEXT:    lw s1, 4(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va1_va_arg_alloca:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s1, 4(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    mv s1, a1
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
@@ -306,21 +306,21 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    andi a0, a0, -16
 ; ILP32-ILP32F-WITHFP-NEXT:    sub a0, sp, a0
 ; ILP32-ILP32F-WITHFP-NEXT:    mv sp, a0
-; ILP32-ILP32F-WITHFP-NEXT:    call notdead
+; ILP32-ILP32F-WITHFP-NEXT:    call notdead at plt
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a0, s1
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, s0, -16
-; ILP32-ILP32F-WITHFP-NEXT:    lw s1, 4(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va1_va_arg_alloca:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -48
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw s0, 8(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw s1, 4(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi s0, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv s1, a1
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a7, 28(s0)
@@ -336,21 +336,21 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    andi a0, a0, -16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sub a0, sp, a0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv sp, a0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead at plt
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a0, s1
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, s0, -16
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw s1, 4(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw s0, 8(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 48
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg_alloca:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s0, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s1, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    mv s1, a1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 56(s0)
@@ -372,21 +372,21 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    and a0, a0, a1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sub a0, sp, a0
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    mv sp, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, s1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, s0, -32
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg_alloca:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s1, 8(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv s1, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
@@ -408,12 +408,12 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    and a0, a0, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sub a0, sp, a0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv sp, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, s1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, s0, -32
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 8(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -431,66 +431,66 @@ define void @va1_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-LABEL: va1_caller:
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -16
-; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a4, zero, 2
 ; ILP32-ILP32F-FPELIM-NEXT:    mv a2, zero
 ; ILP32-ILP32F-FPELIM-NEXT:    call va1
-; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va1_caller:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -16
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a4, zero, 2
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a2, zero
 ; ILP32-ILP32F-WITHFP-NEXT:    call va1
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va1_caller:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -16
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a3, 261888
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a4, zero, 2
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a2, zero
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va1
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va1_caller:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1023
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 52
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a2, zero, 2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va1
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_caller:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1023
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 52
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a2, zero, 2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va1
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %1 = call i32 (i8*, ...) @va1(i8* undef, double 1.0, i32 2)
@@ -524,8 +524,8 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-LABEL: va2:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
@@ -541,8 +541,8 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    lw a0, 0(a1)
 ; ILP32-ILP32F-WITHFP-NEXT:    ori a1, a1, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    lw a1, 0(a1)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -595,8 +595,8 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va2:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
@@ -618,8 +618,8 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a1, -8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    and a0, a0, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, 0(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -664,8 +664,8 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-LABEL: va2_va_arg:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
@@ -682,8 +682,8 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, a1, 8
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, -12(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    lw a1, 0(a2)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -728,8 +728,8 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va2_va_arg:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
@@ -742,8 +742,8 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a1, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -759,61 +759,61 @@ define void @va2_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-LABEL: va2_caller:
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -16
-; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-FPELIM-NEXT:    mv a2, zero
 ; ILP32-ILP32F-FPELIM-NEXT:    call va2
-; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va2_caller:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -16
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a3, 261888
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a2, zero
 ; ILP32-ILP32F-WITHFP-NEXT:    call va2
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va2_caller:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -16
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a3, 261888
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a2, zero
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va2
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va2_caller:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1023
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 52
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va2
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va2_caller:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1023
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 52
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va2
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
  %1 = call i64 (i8*, ...) @va2(i8* undef, double 1.000000e+00)
@@ -849,8 +849,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-LABEL: va3:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 20(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 16(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 20(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 16(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 24
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 20(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a5, 12(s0)
@@ -868,8 +868,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sltu a1, a0, a1
 ; ILP32-ILP32F-WITHFP-NEXT:    add a2, a2, a4
 ; ILP32-ILP32F-WITHFP-NEXT:    add a1, a2, a1
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 16(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 20(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -924,8 +924,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va3:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -80
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 40(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 32(s0)
@@ -947,8 +947,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    and a0, a0, a2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, 0(a0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    add a0, a1, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 80
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -996,8 +996,8 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-LABEL: va3_va_arg:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 20(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 16(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 20(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 16(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 24
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 20(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 16(s0)
@@ -1016,8 +1016,8 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sltu a1, a0, a1
 ; ILP32-ILP32F-WITHFP-NEXT:    add a2, a2, a3
 ; ILP32-ILP32F-WITHFP-NEXT:    add a1, a2, a1
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 16(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 20(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -1063,8 +1063,8 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va3_va_arg:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -80
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 40(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 32(s0)
@@ -1076,8 +1076,8 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ori a3, a0, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    add a0, a1, a2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 80
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -1094,22 +1094,22 @@ define void @va3_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-LABEL: va3_caller:
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -16
-; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 2
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a1, zero, 1111
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a5, 262144
 ; ILP32-ILP32F-FPELIM-NEXT:    mv a2, zero
 ; ILP32-ILP32F-FPELIM-NEXT:    mv a4, zero
 ; ILP32-ILP32F-FPELIM-NEXT:    call va3
-; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va3_caller:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -16
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 2
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, zero, 1111
@@ -1117,51 +1117,51 @@ define void @va3_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a2, zero
 ; ILP32-ILP32F-WITHFP-NEXT:    mv a4, zero
 ; ILP32-ILP32F-WITHFP-NEXT:    call va3
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va3_caller:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -16
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 2
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a1, zero, 1111
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a5, 262144
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a2, zero
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a4, zero
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va3
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va3_caller:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a0, 62
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, zero, 1111
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va3
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va3_caller:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a0, 62
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, zero, 1111
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va3
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
  %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, double 2.000000e+00)
@@ -1174,8 +1174,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-LABEL: va4_va_copy:
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    mv s0, a1
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a7, 44(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a6, 40(sp)
@@ -1187,7 +1187,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, sp, 24
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 4(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 0(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    call notdead
+; ILP32-ILP32F-FPELIM-NEXT:    call notdead at plt
 ; ILP32-ILP32F-FPELIM-NEXT:    lw a0, 4(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, a0, 3
 ; ILP32-ILP32F-FPELIM-NEXT:    andi a0, a0, -4
@@ -1207,17 +1207,17 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    add a1, a1, s0
 ; ILP32-ILP32F-FPELIM-NEXT:    add a1, a1, a2
 ; ILP32-ILP32F-FPELIM-NEXT:    add a0, a1, a0
-; ILP32-ILP32F-FPELIM-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va4_va_copy:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -64
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 28(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 24(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s1, 20(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 32
 ; ILP32-ILP32F-WITHFP-NEXT:    mv s1, a1
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
@@ -1230,7 +1230,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, s0, 8
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, -16(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, -20(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    call notdead
+; ILP32-ILP32F-WITHFP-NEXT:    call notdead at plt
 ; ILP32-ILP32F-WITHFP-NEXT:    lw a0, -16(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, a0, 3
 ; ILP32-ILP32F-WITHFP-NEXT:    andi a0, a0, -4
@@ -1250,17 +1250,17 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    add a1, a1, s1
 ; ILP32-ILP32F-WITHFP-NEXT:    add a1, a1, a2
 ; ILP32-ILP32F-WITHFP-NEXT:    add a0, a1, a0
-; ILP32-ILP32F-WITHFP-NEXT:    lw s1, 20(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 24(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 28(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 64
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va4_va_copy:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -48
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw s0, 8(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv s0, a1
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a7, 44(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a6, 40(sp)
@@ -1272,7 +1272,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, sp, 24
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 4(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 0(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call notdead at plt
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw a0, 4(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, a0, 3
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    andi a0, a0, -4
@@ -1292,16 +1292,16 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    add a1, a1, s0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    add a1, a1, a2
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    add a0, a1, a0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw s0, 8(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 48
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va4_va_copy:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    mv s0, a1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 88(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 80(sp)
@@ -1314,7 +1314,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 8
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call notdead at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 3
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    andi a0, a0, -4
@@ -1334,17 +1334,17 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, a1, s0
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, a1, a2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addw a0, a1, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va4_va_copy:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -112
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s1, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 48
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv s1, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
@@ -1358,7 +1358,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -32(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call notdead at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, -32(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 3
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    andi a0, a0, -4
@@ -1378,9 +1378,9 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    add a1, a1, s1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    add a1, a1, a2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addw a0, a1, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 112
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %vargs = alloca i8*, align 4
@@ -1413,7 +1413,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-LABEL: va5_aligned_stack_caller:
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -64
-; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 60(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 17
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 24(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 16
@@ -1448,16 +1448,16 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a4, zero, 13
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a7, zero, 4
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a5, 32(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    call va5_aligned_stack_callee
-; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 60(sp)
+; ILP32-ILP32F-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
+; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 64
 ; ILP32-ILP32F-FPELIM-NEXT:    ret
 ;
 ; ILP32-ILP32F-WITHFP-LABEL: va5_aligned_stack_caller:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -64
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 60(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 56(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 64
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 17
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 24(sp)
@@ -1493,16 +1493,16 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a4, zero, 13
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a7, zero, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a5, -32(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    call va5_aligned_stack_callee
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 56(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 60(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    call va5_aligned_stack_callee at plt
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 64
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va5_aligned_stack_caller:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -64
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 60(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a0, 262236
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, a0, 655
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 12(sp)
@@ -1537,15 +1537,15 @@ define void @va5_aligned_stack_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a4, zero, 13
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a7, zero, 4
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a5, 32(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va5_aligned_stack_callee
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 60(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 64
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va5_aligned_stack_caller:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -48
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 17
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 24(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 16
@@ -1586,16 +1586,16 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a5, zero, 13
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a7, zero, 14
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd t0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    call va5_aligned_stack_callee
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 40(sp)
+; LP64-LP64F-LP64D-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
+; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 48
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va5_aligned_stack_caller:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -48
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 48
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 17
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 24(sp)
@@ -1637,9 +1637,9 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a5, zero, 13
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a7, zero, 14
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd t0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    call va5_aligned_stack_callee
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    call va5_aligned_stack_callee at plt
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 48
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %1 = call i32 (i32, ...) @va5_aligned_stack_callee(i32 1, i32 11,
@@ -1672,8 +1672,8 @@ define i32 @va6_no_fixed_args(...) nounwind {
 ; ILP32-ILP32F-WITHFP-LABEL: va6_no_fixed_args:
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -48
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a7, 28(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a6, 24(s0)
@@ -1685,8 +1685,8 @@ define i32 @va6_no_fixed_args(...) nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 0(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, s0, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a1, -12(s0)
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 48
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -1726,8 +1726,8 @@ define i32 @va6_no_fixed_args(...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-LABEL: va6_no_fixed_args:
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -96
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
@@ -1740,8 +1740,8 @@ define i32 @va6_no_fixed_args(...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a1, s0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ori a1, a1, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca i8*, align 4
@@ -1804,8 +1804,8 @@ define i32 @va_large_stack(i8* %fmt, ...) {
 ; ILP32-ILP32F-WITHFP:       # %bb.0:
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, -2032
 ; ILP32-ILP32F-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
-; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 1996(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 1992(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 1996(sp) # 4-byte Folded Spill
+; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 1992(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    .cfi_offset ra, -36
 ; ILP32-ILP32F-WITHFP-NEXT:    .cfi_offset s0, -40
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 2000
@@ -1829,8 +1829,8 @@ define i32 @va_large_stack(i8* %fmt, ...) {
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a1, 24414
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a1, a1, -1728
 ; ILP32-ILP32F-WITHFP-NEXT:    add sp, sp, a1
-; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 1992(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 1996(sp)
+; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 1992(sp) # 4-byte Folded Reload
+; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 1996(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    addi sp, sp, 2032
 ; ILP32-ILP32F-WITHFP-NEXT:    ret
 ;
@@ -1932,8 +1932,8 @@ define i32 @va_large_stack(i8* %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP:       # %bb.0:
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, -2032
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 1960(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 1952(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 1960(sp) # 8-byte Folded Spill
+; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 1952(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset ra, -72
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    .cfi_offset s0, -80
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 1968
@@ -1958,8 +1958,8 @@ define i32 @va_large_stack(i8* %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a1, 24414
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a1, a1, -1680
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    add sp, sp, a1
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 1952(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 1960(sp)
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 1952(sp) # 8-byte Folded Reload
+; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 1960(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 2032
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %large = alloca [ 100000000 x i8 ]


        


More information about the llvm-branch-commits mailing list