[clang] [llvm] [RISCV] Inline Assembly Support for GPR Pairs ('Pr') (PR #112983)

Sam Elliott via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 24 06:57:19 PDT 2024


https://github.com/lenary updated https://github.com/llvm/llvm-project/pull/112983

>From 7729c22056472e210f531158268474ee34c6c309 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 12:36:47 -0700
Subject: [PATCH 1/3] [RISCV][NFC] Split branch-relaxation.ll

This really doesn't like my later changes, but really only because of
the tests that the comments tell you to ignore. Instead, make two
separate files, each with correct and expected inputs, for testing.
---
 .../CodeGen/RISCV/branch-relaxation-rv32.ll   | 1010 ++++++
 .../CodeGen/RISCV/branch-relaxation-rv64.ll   | 1013 ++++++
 llvm/test/CodeGen/RISCV/branch-relaxation.ll  | 3226 -----------------
 3 files changed, 2023 insertions(+), 3226 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
 create mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation.ll

diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
new file mode 100644
index 00000000000000..69aaa47e7482aa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
@@ -0,0 +1,1010 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -filetype=obj < %s \
+; RUN:   -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs \
+; RUN:   -filetype=obj < %s -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB0_1
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 4096
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB0_2: # %tail
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %tail
+
+iftrue:
+  call void asm sideeffect ".space 4096", ""()
+  br label %tail
+
+tail:
+  ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB1_1
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    jump .LBB1_2, a0
+; CHECK-NEXT:  .LBB1_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %jmp
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB1_3: # %tail
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %jmp
+
+jmp:
+  call void asm sideeffect "", ""()
+  br label %tail
+
+iftrue:
+  call void asm sideeffect "", ""()
+  br label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %tail
+
+tail:
+  ret i32 1
+}
+
+define void @relax_jal_spill_32() {
+; CHECK-LABEL: relax_jal_spill_32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB2_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB2_4, s11
+; CHECK-NEXT:  .LBB2_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB2_2
+; CHECK-NEXT:  .LBB2_4: # %branch_2
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB2_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i32 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_32_adjust_spill_slot() {
+  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+  ; is out the range of 12-bit signed integer, check whether the spill slot is
+  ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_32_adjust_spill_slot:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
+; CHECK-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    addi s0, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addi a0, a0, -2032
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    srli a0, sp, 12
+; CHECK-NEXT:    slli sp, a0, 12
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB3_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB3_4, s11
+; CHECK-NEXT:  .LBB3_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB3_2
+; CHECK-NEXT:  .LBB3_4: # %branch_2
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB3_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    addi sp, s0, -2032
+; CHECK-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    ret
+  %stack_obj = alloca i32, align 4096
+
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i32 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_32_restore_block_correspondence() {
+; CHECK-LABEL: relax_jal_spill_32_restore_block_correspondence:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    bne t5, t6, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_8: # %dest_1
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB4_1: # %dest_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %cond_2
+; CHECK-NEXT:    bne t3, t4, .LBB4_5
+; CHECK-NEXT:  .LBB4_3: # %dest_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB4_4: # %dest_3
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_5: # %cond_3
+; CHECK-NEXT:    beq t1, t2, .LBB4_4
+; CHECK-NEXT:  # %bb.6: # %space
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  # %bb.7: # %space
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB4_8, s11
+entry:
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  br label %cond_1
+
+cond_1:
+  %cmp1 = icmp eq i32 %t5, %t6
+  br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+  %cmp2 = icmp eq i32 %t3, %t4
+  br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+  %cmp3 = icmp eq i32 %t1, %t2
+  br i1 %cmp3, label %dest_3, label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %dest_1
+
+dest_1:
+  call void asm sideeffect "# dest 1", ""()
+  br label %dest_2
+
+dest_2:
+  call void asm sideeffect "# dest 2", ""()
+  br label %dest_3
+
+dest_3:
+  call void asm sideeffect "# dest 3", ""()
+  br label %tail
+
+tail:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
new file mode 100644
index 00000000000000..90ef390ab68873
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
@@ -0,0 +1,1013 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -filetype=obj < %s \
+; RUN:   -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs \
+; RUN:   -filetype=obj < %s -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB0_1
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 4096
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB0_2: # %tail
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %tail
+
+iftrue:
+  call void asm sideeffect ".space 4096", ""()
+  br label %tail
+
+tail:
+  ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB1_1
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    jump .LBB1_2, a0
+; CHECK-NEXT:  .LBB1_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %jmp
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB1_3: # %tail
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %jmp
+
+jmp:
+  call void asm sideeffect "", ""()
+  br label %tail
+
+iftrue:
+  call void asm sideeffect "", ""()
+  br label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %tail
+
+tail:
+  ret i32 1
+}
+
+
+define void @relax_jal_spill_64() {
+;
+; CHECK-LABEL: relax_jal_spill_64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB2_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB2_4, s11
+; CHECK-NEXT:  .LBB2_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB2_2
+; CHECK-NEXT:  .LBB2_4: # %branch_2
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB2_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i64 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_64_adjust_spill_slot() {
+;
+  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+  ; is out the range of 12-bit signed integer, check whether the spill slot is
+  ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_64_adjust_spill_slot:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
+; CHECK-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    addi s0, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addiw a0, a0, -2032
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    srli a0, sp, 12
+; CHECK-NEXT:    slli sp, a0, 12
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB3_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB3_4, s11
+; CHECK-NEXT:  .LBB3_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB3_2
+; CHECK-NEXT:  .LBB3_4: # %branch_2
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB3_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    addi sp, s0, -2032
+; CHECK-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    ret
+  %stack_obj = alloca i64, align 4096
+
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i64 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_64_restore_block_correspondence() {
+;
+; CHECK-LABEL: relax_jal_spill_64_restore_block_correspondence:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    bne t5, t6, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_8: # %dest_1
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB4_1: # %dest_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %cond_2
+; CHECK-NEXT:    bne t3, t4, .LBB4_5
+; CHECK-NEXT:  .LBB4_3: # %dest_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB4_4: # %dest_3
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_5: # %cond_3
+; CHECK-NEXT:    beq t1, t2, .LBB4_4
+; CHECK-NEXT:  # %bb.6: # %space
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  # %bb.7: # %space
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB4_8, s11
+entry:
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  br label %cond_1
+
+cond_1:
+  %cmp1 = icmp eq i64 %t5, %t6
+  br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+  %cmp2 = icmp eq i64 %t3, %t4
+  br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+  %cmp3 = icmp eq i64 %t1, %t2
+  br i1 %cmp3, label %dest_3, label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %dest_1
+
+dest_1:
+  call void asm sideeffect "# dest 1", ""()
+  br label %dest_2
+
+dest_2:
+  call void asm sideeffect "# dest 2", ""()
+  br label %dest_3
+
+dest_3:
+  call void asm sideeffect "# dest 3", ""()
+  br label %tail
+
+tail:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
deleted file mode 100644
index ec77d54da116d3..00000000000000
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ /dev/null
@@ -1,3226 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -filetype=obj < %s \
-; RUN:   -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs \
-; RUN:   -filetype=obj < %s -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
-; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -filetype=obj < %s \
-; RUN:   -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs \
-; RUN:   -filetype=obj < %s -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-
-define void @relax_bcc(i1 %a) nounwind {
-; CHECK-LABEL: relax_bcc:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    bnez a0, .LBB0_1
-; CHECK-NEXT:    j .LBB0_2
-; CHECK-NEXT:  .LBB0_1: # %iftrue
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    .zero 4096
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:  .LBB0_2: # %tail
-; CHECK-NEXT:    ret
-  br i1 %a, label %iftrue, label %tail
-
-iftrue:
-  call void asm sideeffect ".space 4096", ""()
-  br label %tail
-
-tail:
-  ret void
-}
-
-define i32 @relax_jal(i1 %a) nounwind {
-; CHECK-LABEL: relax_jal:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    bnez a0, .LBB1_1
-; CHECK-NEXT:  # %bb.4:
-; CHECK-NEXT:    jump .LBB1_2, a0
-; CHECK-NEXT:  .LBB1_1: # %iftrue
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    .zero 1048576
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    j .LBB1_3
-; CHECK-NEXT:  .LBB1_2: # %jmp
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:  .LBB1_3: # %tail
-; CHECK-NEXT:    li a0, 1
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  br i1 %a, label %iftrue, label %jmp
-
-jmp:
-  call void asm sideeffect "", ""()
-  br label %tail
-
-iftrue:
-  call void asm sideeffect "", ""()
-  br label %space
-
-space:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %tail
-
-tail:
-  ret i32 1
-}
-
-; For functions whose names contain 32, only the CHECK-RV32 lines are
-; meaningful, and for functions whose names contain 64, only the CHECK-RV64
-; lines are meaningful.
-
-define void @relax_jal_spill_32() {
-; CHECK-RV32-LABEL: relax_jal_spill_32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    beq t5, t6, .LBB2_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    sw s11, 0(sp)
-; CHECK-RV32-NEXT:    jump .LBB2_4, s11
-; CHECK-RV32-NEXT:  .LBB2_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    j .LBB2_2
-; CHECK-RV32-NEXT:  .LBB2_4: # %branch_2
-; CHECK-RV32-NEXT:    lw s11, 0(sp)
-; CHECK-RV32-NEXT:  .LBB2_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 64
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -128
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 128
-; CHECK-RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 48(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 40(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 32(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 24(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t5, t5
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t6, t6
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB2_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    jump .LBB2_2, t5
-; CHECK-RV64-NEXT:  .LBB2_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  .LBB2_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 48(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 128
-; CHECK-RV64-NEXT:    ret
-
-  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i32 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_32_adjust_spill_slot() {
-; CHECK-RV32-LABEL: relax_jal_spill_32_adjust_spill_slot:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    addi s0, sp, 2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV32-NEXT:    lui a0, 2
-; CHECK-RV32-NEXT:    addi a0, a0, -2032
-; CHECK-RV32-NEXT:    sub sp, sp, a0
-; CHECK-RV32-NEXT:    srli a0, sp, 12
-; CHECK-RV32-NEXT:    slli sp, a0, 12
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    beq t5, t6, .LBB3_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    sw s11, 0(sp)
-; CHECK-RV32-NEXT:    jump .LBB3_4, s11
-; CHECK-RV32-NEXT:  .LBB3_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    j .LBB3_2
-; CHECK-RV32-NEXT:  .LBB3_4: # %branch_2
-; CHECK-RV32-NEXT:    lw s11, 0(sp)
-; CHECK-RV32-NEXT:  .LBB3_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    addi sp, s0, -2032
-; CHECK-RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 2032
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_32_adjust_spill_slot:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    addi s0, sp, 2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV64-NEXT:    lui a0, 2
-; CHECK-RV64-NEXT:    addiw a0, a0, -2032
-; CHECK-RV64-NEXT:    sub sp, sp, a0
-; CHECK-RV64-NEXT:    srli a0, sp, 12
-; CHECK-RV64-NEXT:    slli sp, a0, 12
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t0, 0(sp)
-; CHECK-RV64-NEXT:    lui t0, 1
-; CHECK-RV64-NEXT:    add t0, sp, t0
-; CHECK-RV64-NEXT:    sd t5, -8(t0) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t5, t5
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui t0, 1
-; CHECK-RV64-NEXT:    add t0, sp, t0
-; CHECK-RV64-NEXT:    sd t6, -16(t0) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    ld t0, 0(sp)
-; CHECK-RV64-NEXT:    sext.w t6, t6
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB3_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    jump .LBB3_2, t5
-; CHECK-RV64-NEXT:  .LBB3_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  .LBB3_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui a0, 1
-; CHECK-RV64-NEXT:    add a0, sp, a0
-; CHECK-RV64-NEXT:    ld t5, -8(a0) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui a0, 1
-; CHECK-RV64-NEXT:    add a0, sp, a0
-; CHECK-RV64-NEXT:    ld t6, -16(a0) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    addi sp, s0, -2032
-; CHECK-RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 2032
-; CHECK-RV64-NEXT:    ret
-
-  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
-  ; is out the range of 12-bit signed integer, check whether the spill slot is
-  ; adjusted to close to the stack base register.
-  %stack_obj = alloca i32, align 4096
-
-  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i32 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_64() {
-; CHECK-RV32-LABEL: relax_jal_spill_64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -272
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 272
-; CHECK-RV32-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 260(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 256(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 252(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 248(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 244(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 240(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 236(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 232(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 228(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 224(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 220(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t0, 216(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t1, 212(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t1, 208(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t2, 204(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t2, 200(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t3, 196(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s0, 192(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 188(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s1, 184(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 180(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a1, 176(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a1, 172(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a2, 168(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a2, 164(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a3, 160(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a3, 156(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a4, 152(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a4, 148(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a5, 144(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a5, 140(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a6, 136(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a6, 132(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a7, 128(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a7, 124(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t0, 120(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s2, 116(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 112(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s3, 108(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 104(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s4, 100(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 96(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s5, 92(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 88(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s6, 84(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 80(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s7, 76(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 72(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s8, 68(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 64(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s9, 60(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 56(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s10, 52(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 48(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s11, 44(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t3, 40(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t4, 36(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t4, 32(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t5, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    mv a1, t6
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a1, 24(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a1, a1, s0
-; CHECK-RV32-NEXT:    sw t6, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a2, t5, t6
-; CHECK-RV32-NEXT:    or a1, a2, a1
-; CHECK-RV32-NEXT:    beqz a1, .LBB4_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    jump .LBB4_2, a1
-; CHECK-RV32-NEXT:  .LBB4_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  .LBB4_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t0, 216(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t1, 212(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t1, 208(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t2, 204(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t2, 200(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t3, 196(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s0, 192(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 188(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s1, 184(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 180(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a1, 176(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a1, 172(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a2, 168(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a2, 164(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a3, 160(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a3, 156(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a4, 152(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a4, 148(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a5, 144(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a5, 140(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a6, 136(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a6, 132(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a7, 128(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a7, 124(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t0, 120(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s2, 116(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 112(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s3, 108(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 104(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s4, 100(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 96(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s5, 92(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 88(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s6, 84(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 80(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s7, 76(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 72(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s8, 68(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 64(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s9, 60(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 56(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s10, 52(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 48(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s11, 44(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t3, 40(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t4, 36(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t4, 32(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t5, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t6, 24(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t6, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 260(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 256(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 252(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 248(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 244(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 240(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 236(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 232(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 228(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 224(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 220(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 272
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -112
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 112
-; CHECK-RV64-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB4_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    sd s11, 0(sp)
-; CHECK-RV64-NEXT:    jump .LBB4_4, s11
-; CHECK-RV64-NEXT:  .LBB4_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    j .LBB4_2
-; CHECK-RV64-NEXT:  .LBB4_4: # %branch_2
-; CHECK-RV64-NEXT:    ld s11, 0(sp)
-; CHECK-RV64-NEXT:  .LBB4_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 112
-; CHECK-RV64-NEXT:    ret
-
-  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i64 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_64_adjust_spill_slot() {
-; CHECK-RV32-LABEL: relax_jal_spill_64_adjust_spill_slot:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    addi s0, sp, 2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV32-NEXT:    lui a0, 2
-; CHECK-RV32-NEXT:    addi a0, a0, -2032
-; CHECK-RV32-NEXT:    sub sp, sp, a0
-; CHECK-RV32-NEXT:    srli a0, sp, 12
-; CHECK-RV32-NEXT:    slli sp, a0, 12
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t0, -4(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t1, -8(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t1, -12(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t2, -16(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t2, -20(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t3, -24(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s0, -28(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s1, -32(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s1, -36(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s2, -40(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw a1, -44(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a3, 1
-; CHECK-RV32-NEXT:    add a3, sp, a3
-; CHECK-RV32-NEXT:    sw a1, -48(a3) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a2, -52(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a2, -56(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a3, -60(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a3, -64(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a4, -68(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a4, -72(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a5, -76(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a5, -80(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a6, -84(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a6, -88(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a7, -92(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a7, -96(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t0, -100(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s2, -104(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s3, -108(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s3, -112(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s4, -116(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s4, -120(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s5, -124(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s5, -128(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s6, -132(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s6, -136(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s7, -140(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s7, -144(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s8, -148(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s8, -152(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s9, -156(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s9, -160(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s10, -164(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s10, -168(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s11, -172(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s11, -176(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t3, -180(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t4, -184(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t4, -188(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t5, -192(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    mv a1, t6
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw s0, -208(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw a1, -196(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a1, a1, s0
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw t6, -200(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw t5, -204(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a2, t5, t6
-; CHECK-RV32-NEXT:    or a1, a2, a1
-; CHECK-RV32-NEXT:    beqz a1, .LBB5_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    jump .LBB5_2, a1
-; CHECK-RV32-NEXT:  .LBB5_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  .LBB5_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t0, -4(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t1, -8(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t1, -12(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t2, -16(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t2, -20(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t3, -24(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s0, -28(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s1, -32(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s1, -36(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s2, -40(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw a1, -44(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a1, -48(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a2, -52(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a2, -56(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a3, -60(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a3, -64(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a4, -68(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a4, -72(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a5, -76(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a5, -80(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a6, -84(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a6, -88(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a7, -92(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a7, -96(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t0, -100(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s2, -104(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s3, -108(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s3, -112(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s4, -116(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s4, -120(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s5, -124(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s5, -128(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s6, -132(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s6, -136(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s7, -140(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s7, -144(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s8, -148(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s8, -152(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s9, -156(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s9, -160(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s10, -164(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s10, -168(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s11, -172(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s11, -176(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t3, -180(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t4, -184(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t4, -188(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t5, -192(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t5, -204(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t6, -196(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s0, -208(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t6, -200(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    addi sp, s0, -2032
-; CHECK-RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 2032
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_64_adjust_spill_slot:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    addi s0, sp, 2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV64-NEXT:    lui a0, 2
-; CHECK-RV64-NEXT:    addiw a0, a0, -2032
-; CHECK-RV64-NEXT:    sub sp, sp, a0
-; CHECK-RV64-NEXT:    srli a0, sp, 12
-; CHECK-RV64-NEXT:    slli sp, a0, 12
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB5_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    sd s11, 0(sp)
-; CHECK-RV64-NEXT:    jump .LBB5_4, s11
-; CHECK-RV64-NEXT:  .LBB5_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    j .LBB5_2
-; CHECK-RV64-NEXT:  .LBB5_4: # %branch_2
-; CHECK-RV64-NEXT:    ld s11, 0(sp)
-; CHECK-RV64-NEXT:  .LBB5_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    addi sp, s0, -2032
-; CHECK-RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 2032
-; CHECK-RV64-NEXT:    ret
-
-  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
-  ; is out the range of 12-bit signed integer, check whether the spill slot is
-  ; adjusted to close to the stack base register.
-  %stack_obj = alloca i64, align 4096
-
-  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i64 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_32_restore_block_correspondence() {
-; CHECK-RV32-LABEL: relax_jal_spill_32_restore_block_correspondence:
-; CHECK-RV32:       # %bb.0: # %entry
-; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    bne t5, t6, .LBB6_2
-; CHECK-RV32-NEXT:    j .LBB6_1
-; CHECK-RV32-NEXT:  .LBB6_8: # %dest_1
-; CHECK-RV32-NEXT:    lw s11, 0(sp)
-; CHECK-RV32-NEXT:  .LBB6_1: # %dest_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # dest 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    j .LBB6_3
-; CHECK-RV32-NEXT:  .LBB6_2: # %cond_2
-; CHECK-RV32-NEXT:    bne t3, t4, .LBB6_5
-; CHECK-RV32-NEXT:  .LBB6_3: # %dest_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # dest 2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  .LBB6_4: # %dest_3
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # dest 3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 64
-; CHECK-RV32-NEXT:    ret
-; CHECK-RV32-NEXT:  .LBB6_5: # %cond_3
-; CHECK-RV32-NEXT:    beq t1, t2, .LBB6_4
-; CHECK-RV32-NEXT:  # %bb.6: # %space
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  # %bb.7: # %space
-; CHECK-RV32-NEXT:    sw s11, 0(sp)
-; CHECK-RV32-NEXT:    jump .LBB6_8, s11
-;
-; CHECK-RV64-LABEL: relax_jal_spill_32_restore_block_correspondence:
-; CHECK-RV64:       # %bb.0: # %entry
-; CHECK-RV64-NEXT:    addi sp, sp, -128
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 128
-; CHECK-RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 48(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 40(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 32(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 24(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t6, t6
-; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t5, t5
-; CHECK-RV64-NEXT:    bne t5, t6, .LBB6_2
-; CHECK-RV64-NEXT:  .LBB6_1: # %dest_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # dest 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    j .LBB6_3
-; CHECK-RV64-NEXT:  .LBB6_2: # %cond_2
-; CHECK-RV64-NEXT:    sext.w t5, t4
-; CHECK-RV64-NEXT:    sext.w t6, t3
-; CHECK-RV64-NEXT:    bne t6, t5, .LBB6_5
-; CHECK-RV64-NEXT:  .LBB6_3: # %dest_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # dest 2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  .LBB6_4: # %dest_3
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # dest 3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 48(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 128
-; CHECK-RV64-NEXT:    ret
-; CHECK-RV64-NEXT:  .LBB6_5: # %cond_3
-; CHECK-RV64-NEXT:    sext.w t5, t2
-; CHECK-RV64-NEXT:    sext.w t6, t1
-; CHECK-RV64-NEXT:    beq t6, t5, .LBB6_4
-; CHECK-RV64-NEXT:  # %bb.6: # %space
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  # %bb.7: # %space
-; CHECK-RV64-NEXT:    jump .LBB6_1, t5
-entry:
-  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  br label %cond_1
-
-cond_1:
-  %cmp1 = icmp eq i32 %t5, %t6
-  br i1 %cmp1, label %dest_1, label %cond_2
-
-cond_2:
-  %cmp2 = icmp eq i32 %t3, %t4
-  br i1 %cmp2, label %dest_2, label %cond_3
-
-cond_3:
-  %cmp3 = icmp eq i32 %t1, %t2
-  br i1 %cmp3, label %dest_3, label %space
-
-space:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %dest_1
-
-dest_1:
-  call void asm sideeffect "# dest 1", ""()
-  br label %dest_2
-
-dest_2:
-  call void asm sideeffect "# dest 2", ""()
-  br label %dest_3
-
-dest_3:
-  call void asm sideeffect "# dest 3", ""()
-  br label %tail
-
-tail:
-  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
-
-  ret void
-}

>From 483abe4679b3336af07baabb5152f66faef9ff22 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 12:37:48 -0700
Subject: [PATCH 2/3] [RISCV] GPR Pairs for Inline Asm using `Pr`

This patch adds support for getting even-odd general purpose register
pairs into and out of inline assembly using the `Pr` constraint as
proposed in riscv-non-isa/riscv-c-api-doc#92

There are a few different pieces to this patch, each of which need their
own explanation.

Target-Independent Changes:
- This adds two new Machine Value Types (MVTs), which represent pairs for
  each xlen. Two are needed because MVTs usually have a fixed length. This
  change unfortunately increases the size of SelectionDAG tables indexed
  by MVT by a small percentage.

- When an inline assembly block returns multiple values, it returns them
  in a struct, rather than as a single value. This fixes TargetLowering
  so that `getAsmOperandValueType` is called on the types in that
  struct, so that targets have the opportunity to propose their own MVT
  for an inline assembly operand where this wouldn't match conventional
  arguments/return values. This matches what happens when a single value
  is returned.

RISC-V Changes:
- Renames the Register Class used for f64 values on rv32i_zdinx from
  `GPRPair*` to `GPRF64Pair*`. These register classes are kept broadly
  unmodified, as their primary value type is used for type inference
  over selection patterns. This rename affects quite a lot of files. I
  reordered the definitions in RISCVRegisterInfo.td and added headings
  to make it easier to browse.

- Adds new `GPRPair*` register classes which will be used for `Pr`
  constraints and for instructions that need an even-odd GPR pair. This
  new type is used for `amocas.d.*`(rv32) and `amocas.q.*`(rv64) in
  Zacas, instead of the `GPRF64Pair` class being used before.

- Marks the new `GPRPair` class legal as for holding a
  `MVT::riscv_i<xlen>_pair`. Two new RISCVISD node types are added for
  creating and destructing a pair - `BuildGPRPair` and `SplitGPRPair`,
  and are introduced when bitcasting to/from the pair type and the
  `i<2*xlen>` type.

- This adds an override for `getNumRegisters` to ensure that `i<2*xlen>`
  values, when going to/from inline assembly, only allocate one (pair)
  register (they would otherwise allocate two).

- Ensures that the DAGCombiner doesn't merge the `bitcast` between
  `i<2*xlen>` types and the pair type into a load/store, as we want to
  legalise these 2*xlen-wide loads/stores as before - by splitting them
  into two xlen-wide loads/stores, which will happen with `i<2*xlen>`
  types.

- Ensures that Clang understands that `Pr` is a valid inline assembly
  constraint.
---
 clang/lib/Basic/Targets/RISCV.cpp             |  11 +-
 clang/test/CodeGen/RISCV/riscv-inline-asm.c   |  13 ++
 llvm/include/llvm/CodeGen/ValueTypes.td       |  25 ++-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |   3 +-
 llvm/lib/CodeGen/ValueTypes.cpp               |   6 +
 .../Target/RISCV/AsmParser/RISCVAsmParser.cpp |  22 +-
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   |  31 ++-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  75 ++++++-
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |  24 +++
 llvm/lib/Target/RISCV/RISCVInstrInfoD.td      |  12 +-
 llvm/lib/Target/RISCV/RISCVRegisterInfo.td    | 195 +++++++++++-------
 llvm/lib/Target/RISCV/RISCVSubtarget.h        |   4 +
 .../CodeGen/RISCV/rv32-inline-asm-pairs.ll    |  73 +++++++
 .../CodeGen/RISCV/rv64-inline-asm-pairs.ll    |  73 +++++++
 14 files changed, 455 insertions(+), 112 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll

diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index eaaba7642bd7b2..07bf002ed73928 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -108,6 +108,14 @@ bool RISCVTargetInfo::validateAsmConstraint(
       return true;
     }
     return false;
+  case 'P':
+    // An even-odd register pair - GPR
+    if (Name[1] == 'r') {
+      Info.setAllowsRegister();
+      Name += 1;
+      return true;
+    }
+    return false;
   case 'v':
     // A vector register.
     if (Name[1] == 'r' || Name[1] == 'd' || Name[1] == 'm') {
@@ -122,8 +130,9 @@ bool RISCVTargetInfo::validateAsmConstraint(
 std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
   std::string R;
   switch (*Constraint) {
-  // c* and v* are two-letter constraints on RISC-V.
+  // c*, P*, and v* are all two-letter constraints on RISC-V.
   case 'c':
+  case 'P':
   case 'v':
     R = std::string("^") + std::string(Constraint, 2);
     Constraint += 1;
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm.c b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
index 75b91d3c497c50..eb6e42f3eb9529 100644
--- a/clang/test/CodeGen/RISCV/riscv-inline-asm.c
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
@@ -33,6 +33,19 @@ void test_cf(float f, double d) {
   asm volatile("" : "=cf"(cd) : "cf"(d));
 }
 
+#if __riscv_xlen == 32
+typedef long long double_xlen_t;
+#elif __riscv_xlen == 64
+typedef __int128_t double_xlen_t;
+#endif
+double_xlen_t test_Pr_wide_scalar(double_xlen_t p) {
+// CHECK-LABEL: define{{.*}} {{i128|i64}} @test_Pr_wide_scalar(
+// CHECK: call {{i128|i64}} asm sideeffect "", "=^Pr,^Pr"({{i128|i64}} %{{.*}})
+  double_xlen_t ret;
+  asm volatile("" : "=Pr"(ret) : "Pr"(p));
+  return ret;
+}
+
 void test_I(void) {
 // CHECK-LABEL: define{{.*}} void @test_I()
 // CHECK: call void asm sideeffect "", "I"(i32 2047)
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td
index 493c0cfcab60ce..9c910c0085fce9 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -317,20 +317,23 @@ def riscv_nxv16i8x3  : VTVecTup<384, 3, i8, 220>; // RISCV vector tuple(min_num_
 def riscv_nxv16i8x4  : VTVecTup<512, 4, i8, 221>; // RISCV vector tuple(min_num_elts=16, nf=4)
 def riscv_nxv32i8x2  : VTVecTup<512, 2, i8, 222>; // RISCV vector tuple(min_num_elts=32, nf=2)
 
-def x86mmx    : ValueType<64,   223>;  // X86 MMX value
-def Glue      : ValueType<0,    224>;  // Pre-RA sched glue
-def isVoid    : ValueType<0,    225>;  // Produces no value
-def untyped   : ValueType<8,    226> { // Produces an untyped value
+def riscv_i32_pair : ValueType<64, 223>; // RISCV pair of RV32 GPRs
+def riscv_i64_pair : ValueType<128, 224>; // RISCV pair of RV64 GPRs
+
+def x86mmx    : ValueType<64,   225>;  // X86 MMX value
+def Glue      : ValueType<0,    226>;  // Pre-RA sched glue
+def isVoid    : ValueType<0,    227>;  // Produces no value
+def untyped   : ValueType<8,    228> { // Produces an untyped value
   let LLVMName = "Untyped";
 }
-def funcref   : ValueType<0,    227>;  // WebAssembly's funcref type
-def externref : ValueType<0,    228>;  // WebAssembly's externref type
-def exnref    : ValueType<0,    229>;  // WebAssembly's exnref type
-def x86amx    : ValueType<8192, 230>;  // X86 AMX value
-def i64x8     : ValueType<512,  231>;  // 8 Consecutive GPRs (AArch64)
+def funcref   : ValueType<0,    229>;  // WebAssembly's funcref type
+def externref : ValueType<0,    230>;  // WebAssembly's externref type
+def exnref    : ValueType<0,    231>;  // WebAssembly's exnref type
+def x86amx    : ValueType<8192, 232>;  // X86 AMX value
+def i64x8     : ValueType<512,  233>;  // 8 Consecutive GPRs (AArch64)
 def aarch64svcount
-              : ValueType<16,  232>;  // AArch64 predicate-as-counter
-def spirvbuiltin : ValueType<0, 233>; // SPIR-V's builtin type
+              : ValueType<16,  234>;  // AArch64 predicate-as-counter
+def spirvbuiltin : ValueType<0, 235>; // SPIR-V's builtin type
 
 let isNormalValueType = false in {
 def token      : ValueType<0, 504>;  // TokenTy
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 758b3a5fc526e7..053d8ba098d9e5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -5730,7 +5730,8 @@ TargetLowering::ParseConstraints(const DataLayout &DL,
       assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
       if (auto *STy = dyn_cast<StructType>(Call.getType())) {
         OpInfo.ConstraintVT =
-            getSimpleValueType(DL, STy->getElementType(ResNo));
+            getAsmOperandValueType(DL, STy->getElementType(ResNo))
+                .getSimpleVT();
       } else {
         assert(ResNo == 0 && "Asm only has one result!");
         OpInfo.ConstraintVT =
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index e3c746b274dde1..7ce7102fe98a5f 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -177,6 +177,10 @@ std::string EVT::getEVTString() const {
     if (isFloatingPoint())
       return "f" + utostr(getSizeInBits());
     llvm_unreachable("Invalid EVT!");
+  case MVT::riscv_i32_pair:
+    return "riscv_i32_pair";
+  case MVT::riscv_i64_pair:
+    return "riscv_i64_pair";
   case MVT::bf16:      return "bf16";
   case MVT::ppcf128:   return "ppcf128";
   case MVT::isVoid:    return "isVoid";
@@ -214,6 +218,8 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
     assert(isExtended() && "Type is not extended!");
     return LLVMTy;
   case MVT::isVoid:  return Type::getVoidTy(Context);
+  case MVT::riscv_i32_pair: return IntegerType::get(Context, 64);
+  case MVT::riscv_i64_pair: return IntegerType::get(Context, 128);
   case MVT::x86mmx:  return llvm::FixedVectorType::get(llvm::IntegerType::get(Context, 64), 1);
   case MVT::aarch64svcount:
     return TargetExtType::get(Context, "aarch64.svcount");
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 4d46afb8c4ef97..1b23b36a59e0ec 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -481,6 +481,12 @@ struct RISCVOperand final : public MCParsedAsmOperand {
            RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum);
   }
 
+  bool isGPRPair() const {
+    return Kind == KindTy::Register &&
+           RISCVMCRegisterClasses[RISCV::GPRPairRegClassID].contains(
+               Reg.RegNum);
+  }
+
   bool isGPRF16() const {
     return Kind == KindTy::Register &&
            RISCVMCRegisterClasses[RISCV::GPRF16RegClassID].contains(Reg.RegNum);
@@ -491,17 +497,17 @@ struct RISCVOperand final : public MCParsedAsmOperand {
            RISCVMCRegisterClasses[RISCV::GPRF32RegClassID].contains(Reg.RegNum);
   }
 
-  bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
-  bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
-  bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
-  bool isGPRPairAsFPR() const { return isGPRPair() && Reg.IsGPRAsFPR; }
-
-  bool isGPRPair() const {
+  bool isGPRF64Pair() const {
     return Kind == KindTy::Register &&
-           RISCVMCRegisterClasses[RISCV::GPRPairRegClassID].contains(
+           RISCVMCRegisterClasses[RISCV::GPRF64PairRegClassID].contains(
                Reg.RegNum);
   }
 
+  bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
+  bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
+  bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
+  bool isGPRPairAsFPR64() const { return isGPRF64Pair() && Reg.IsGPRAsFPR; }
+
   static bool evaluateConstantImm(const MCExpr *Expr, int64_t &Imm,
                                   RISCVMCExpr::VariantKind &VK) {
     if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
@@ -2399,7 +2405,7 @@ ParseStatus RISCVAsmParser::parseGPRPairAsFPR64(OperandVector &Operands) {
   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   MCRegister Pair = RI->getMatchingSuperReg(
       Reg, RISCV::sub_gpr_even,
-      &RISCVMCRegisterClasses[RISCV::GPRPairRegClassID]);
+      &RISCVMCRegisterClasses[RISCV::GPRF64PairRegClassID]);
   Operands.push_back(RISCVOperand::createReg(Pair, S, E, /*isGPRAsFPR=*/true));
   return ParseStatus::Success;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index dc3f8254cb4e00..1abb693eb47665 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -953,6 +953,35 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     ReplaceNode(Node, Res);
     return;
   }
+  case RISCVISD::BuildGPRPair: {
+    SDValue Ops[] = {
+        CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
+        Node->getOperand(0),
+        CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
+        Node->getOperand(1),
+        CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
+
+    SDNode *N = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
+                                       Subtarget->getXLenPairVT(), Ops);
+    ReplaceNode(Node, N);
+    return;
+  }
+  case RISCVISD::SplitGPRPair: {
+    if (!SDValue(Node, 0).use_empty()) {
+      SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL, VT,
+                                                  Node->getOperand(0));
+      ReplaceUses(SDValue(Node, 0), Lo);
+    }
+
+    if (!SDValue(Node, 1).use_empty()) {
+      SDValue Hi = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_odd, DL, VT,
+                                                  Node->getOperand(0));
+      ReplaceUses(SDValue(Node, 1), Hi);
+    }
+
+    CurDAG->RemoveDeadNode(Node);
+    return;
+  }
   case RISCVISD::BuildPairF64: {
     if (!Subtarget->hasStdExtZdinx())
       break;
@@ -960,7 +989,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     assert(!Subtarget->is64Bit() && "Unexpected subtarget");
 
     SDValue Ops[] = {
-        CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
+        CurDAG->getTargetConstant(RISCV::GPRF64PairRegClassID, DL, MVT::i32),
         Node->getOperand(0),
         CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
         Node->getOperand(1),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 69112d868bff82..a439cccb38f345 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -114,9 +114,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   }
 
   MVT XLenVT = Subtarget.getXLenVT();
+  MVT XLenPairVT = Subtarget.getXLenPairVT();
 
   // Set up the register classes.
   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
+  addRegisterClass(XLenPairVT, &RISCV::GPRPairRegClass);
 
   if (Subtarget.hasStdExtZfhmin())
     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
@@ -134,7 +136,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     if (Subtarget.is64Bit())
       addRegisterClass(MVT::f64, &RISCV::GPRRegClass);
     else
-      addRegisterClass(MVT::f64, &RISCV::GPRPairRegClass);
+      addRegisterClass(MVT::f64, &RISCV::GPRF64PairRegClass);
   }
 
   static const MVT::SimpleValueType BoolVecVTs[] = {
@@ -296,6 +298,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setCondCodeAction(ISD::SETLE, XLenVT, Expand);
   }
 
+  if (Subtarget.is64Bit())
+    setOperationAction(ISD::BITCAST, MVT::i128, Custom);
+  else
+    setOperationAction(ISD::BITCAST, MVT::i64, Custom);
+
   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
 
   setOperationAction(ISD::VASTART, MVT::Other, Custom);
@@ -2224,6 +2231,17 @@ bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
   return Index == 0 || Index == ResElts;
 }
 
+EVT RISCVTargetLowering::getAsmOperandValueType(const DataLayout &DL, Type *Ty,
+                                                bool AllowUnknown) const {
+  if (!Subtarget.is64Bit() && Ty->isIntegerTy(64))
+    return MVT::riscv_i32_pair;
+
+  if (Subtarget.is64Bit() && Ty->isIntegerTy(128))
+    return MVT::riscv_i64_pair;
+
+  return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
+}
+
 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                       CallingConv::ID CC,
                                                       EVT VT) const {
@@ -2238,6 +2256,17 @@ MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
   return PartVT;
 }
 
+unsigned
+RISCVTargetLowering::getNumRegisters(LLVMContext &Context, EVT VT,
+                                     std::optional<MVT> RegisterVT) const {
+  // Pair inline assembly operand
+  if (VT == (Subtarget.is64Bit() ? MVT::i128 : MVT::i64) && RegisterVT &&
+      *RegisterVT == Subtarget.getXLenPairVT())
+    return 1;
+
+  return TargetLowering::getNumRegisters(Context, VT, RegisterVT);
+}
+
 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
                                                            CallingConv::ID CC,
                                                            EVT VT) const {
@@ -2776,6 +2805,19 @@ RISCVTargetLowering::computeVLMAXBounds(MVT VecVT,
   return std::make_pair(MinVLMAX, MaxVLMAX);
 }
 
+bool RISCVTargetLowering::isLoadBitCastBeneficial(
+    EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
+    const MachineMemOperand &MMO) const {
+  // We want to leave `bitcasts` to/from MVT::riscv_i<xlen>_pair separate from
+  // loads/stores so they can be turned into BuildGPRPair/::SplitGPRPair nodes.
+  if (LoadVT == (Subtarget.is64Bit() ? MVT::i128 : MVT::i64) &&
+      BitcastVT == Subtarget.getXLenPairVT())
+    return false;
+
+  return TargetLoweringBase::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG,
+                                                     MMO);
+}
+
 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
 // of either is (currently) supported. This can get us into an infinite loop
 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
@@ -6413,6 +6455,13 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
       std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, MVT::i32, MVT::i32);
       return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
     }
+    if (VT == Subtarget.getXLenPairVT() && Op0VT.isScalarInteger() &&
+        Op0VT.getSizeInBits() == 2 * Subtarget.getXLen()) {
+      SDValue Lo, Hi;
+      std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, XLenVT, XLenVT);
+      return DAG.getNode(RISCVISD::BuildGPRPair, DL, Subtarget.getXLenPairVT(),
+                         Lo, Hi);
+    }
 
     // Consider other scalar<->scalar casts as legal if the types are legal.
     // Otherwise expand them.
@@ -12886,6 +12935,14 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
                                    NewReg.getValue(0), NewReg.getValue(1));
       Results.push_back(RetReg);
+    } else if (VT.isInteger() &&
+               VT.getSizeInBits() == 2 * Subtarget.getXLen() &&
+               Op0VT == Subtarget.getXLenPairVT()) {
+      SDValue NewReg = DAG.getNode(RISCVISD::SplitGPRPair, DL,
+                                   DAG.getVTList(XLenVT, XLenVT), Op0);
+      SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, VT, NewReg.getValue(0),
+                                   NewReg.getValue(1));
+      Results.push_back(RetReg);
     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
                isTypeLegal(Op0VT)) {
       // Custom-legalize bitcasts from fixed-length vector types to illegal
@@ -20130,6 +20187,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(TAIL)
   NODE_NAME_CASE(SELECT_CC)
   NODE_NAME_CASE(BR_CC)
+  NODE_NAME_CASE(BuildGPRPair)
+  NODE_NAME_CASE(SplitGPRPair)
   NODE_NAME_CASE(BuildPairF64)
   NODE_NAME_CASE(SplitF64)
   NODE_NAME_CASE(ADD_LO)
@@ -20408,6 +20467,8 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
       return C_RegisterClass;
     if (Constraint == "cr" || Constraint == "cf")
       return C_RegisterClass;
+    if (Constraint == "Pr")
+      return C_RegisterClass;
   }
   return TargetLowering::getConstraintType(Constraint);
 }
@@ -20429,7 +20490,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
         return std::make_pair(0U, &RISCV::GPRF32NoX0RegClass);
       if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+        return std::make_pair(0U, &RISCV::GPRF64PairNoX0RegClass);
       return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
     case 'f':
       if (VT == MVT::f16) {
@@ -20446,7 +20507,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
         if (Subtarget.hasStdExtD())
           return std::make_pair(0U, &RISCV::FPR64RegClass);
         if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-          return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+          return std::make_pair(0U, &RISCV::GPRF64PairNoX0RegClass);
         if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
           return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
       }
@@ -20488,7 +20549,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
     if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
       return std::make_pair(0U, &RISCV::GPRF32CRegClass);
     if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-      return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+      return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
     if (!VT.isVector())
       return std::make_pair(0U, &RISCV::GPRCRegClass);
   } else if (Constraint == "cf") {
@@ -20506,10 +20567,12 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (Subtarget.hasStdExtD())
         return std::make_pair(0U, &RISCV::FPR64CRegClass);
       if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+        return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
       if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
         return std::make_pair(0U, &RISCV::GPRCRegClass);
     }
+  } else if (Constraint == "Pr") {
+    return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
   }
 
   // Clang will correctly decode the usage of register name aliases into their
@@ -20670,7 +20733,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
   // Subtarget into account.
   if (Res.second == &RISCV::GPRF16RegClass ||
       Res.second == &RISCV::GPRF32RegClass ||
-      Res.second == &RISCV::GPRPairRegClass)
+      Res.second == &RISCV::GPRF64PairRegClass)
     return std::make_pair(Res.first, &RISCV::GPRRegClass);
 
   return Res;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 0b07ad7d7a423f..deaefafc73535e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -44,6 +44,18 @@ enum NodeType : unsigned {
   SELECT_CC,
   BR_CC,
 
+  /// Turn a pair of `i<xlen>`s into a `riscv_i<xlen>_pair`.
+  /// - Output: `riscv_i<xlen>_pair`
+  /// - Input 0: `i<xlen>` low-order bits, for even register.
+  /// - Input 1: `i<xlen>` high-order bits, for odd register.
+  BuildGPRPair,
+
+  /// Turn a `riscv_i<xlen>_pair` into a pair of `i<xlen>`s.
+  /// - Output 0: `i<xlen>` low-order bits, from even register.
+  /// - Output 1: `i<xlen>` high-order bits, from odd register.
+  /// - Input: `riscv_i<xlen>_pair`
+  SplitGPRPair,
+
   /// Turns a pair of `i32`s into an `f64`. Needed for rv32d/ilp32.
   /// - Output: `f64`.
   /// - Input 0: low-order bits (31-0) (as `i32`), for even register.
@@ -544,11 +556,19 @@ class RISCVTargetLowering : public TargetLowering {
 
   bool softPromoteHalfType() const override { return true; }
 
+  EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
+                             bool AllowUnknown = false) const override;
+
   /// Return the register type for a given MVT, ensuring vectors are treated
   /// as a series of gpr sized integers.
   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
                                     EVT VT) const override;
 
+  /// Return the number of registers for a given MVT, for inline assembly
+  unsigned
+  getNumRegisters(LLVMContext &Context, EVT VT,
+                  std::optional<MVT> RegisterVT = std::nullopt) const override;
+
   /// Return the number of registers for a given MVT, ensuring vectors are
   /// treated as a series of gpr sized integers.
   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
@@ -584,6 +604,10 @@ class RISCVTargetLowering : public TargetLowering {
     return false;
   }
 
+  bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
+                               const SelectionDAG &DAG,
+                               const MachineMemOperand &MMO) const override;
+
   bool
   shouldExpandBuildVectorWithShuffles(EVT VT,
                                       unsigned DefinedValues) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index 5c8977142ad1b4..34f86534ce9fd3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -36,7 +36,7 @@ def AddrRegImmINX : ComplexPattern<iPTR, 2, "SelectAddrRegImmRV32Zdinx">;
 def GPRPairAsFPR : AsmOperandClass {
   let Name = "GPRPairAsFPR";
   let ParserMethod = "parseGPRPairAsFPR64";
-  let PredicateMethod = "isGPRPairAsFPR";
+  let PredicateMethod = "isGPRPairAsFPR64";
   let RenderMethod = "addRegOperands";
 }
 
@@ -52,7 +52,7 @@ def FPR64INX : RegisterOperand<GPR> {
   let DecoderMethod = "DecodeGPRRegisterClass";
 }
 
-def FPR64IN32X : RegisterOperand<GPRPair> {
+def FPR64IN32X : RegisterOperand<GPRF64Pair> {
   let ParserMatchClass = GPRPairAsFPR;
 }
 
@@ -523,15 +523,15 @@ def PseudoFROUND_D_IN32X : PseudoFROUND<FPR64IN32X, f64>;
 
 /// Loads
 let isCall = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in
-def PseudoRV32ZdinxLD : Pseudo<(outs GPRPair:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
+def PseudoRV32ZdinxLD : Pseudo<(outs GPRF64Pair:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
 def : Pat<(f64 (load (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12))),
           (PseudoRV32ZdinxLD GPR:$rs1, simm12:$imm12)>;
 
 /// Stores
 let isCall = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in
-def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
-def : Pat<(store (f64 GPRPair:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
-          (PseudoRV32ZdinxSD GPRPair:$rs2, GPR:$rs1, simm12:$imm12)>;
+def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRF64Pair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
+def : Pat<(store (f64 GPRF64Pair:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
+          (PseudoRV32ZdinxSD GPRF64Pair:$rs2, GPR:$rs1, simm12:$imm12)>;
 } // Predicates = [HasStdExtZdinx, IsRV32]
 
 let Predicates = [HasStdExtD] in {
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 685f04213afa86..0acca502f10c18 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -74,7 +74,10 @@ def sub_gpr_odd  : SubRegIndex<32, 32> {
 }
 } // Namespace = "RISCV"
 
-// Integer registers
+//===----------------------------------------------------------------------===//
+//  General Purpose Registers (aka Integer Registers)
+//===----------------------------------------------------------------------===//
+
 // CostPerUse is set higher for registers that may not be compressible as they
 // are not part of GPRC, the most restrictive register class used by the
 // compressed instruction set. This will influence the greedy register
@@ -210,6 +213,7 @@ def XLenFVT : ValueTypeByHwMode<[RV64],
                                 [f64]>;
 def XLenPairFVT : ValueTypeByHwMode<[RV32],
                                     [f64]>;
+
 def XLenRI : RegInfoByHwMode<
       [RV32,              RV64],
       [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
@@ -279,7 +283,67 @@ def SR07 : GPRRegisterClass<(add (sequence "X%u", 8, 9),
 
 def GPRX1X5 :  GPRRegisterClass<(add X1, X5)>;
 
-// Floating point registers
+//===----------------------------------------------------------------------===//
+//  Even-Odd GPR Pairs
+//===----------------------------------------------------------------------===//
+
+def XLenPairVT : ValueTypeByHwMode<
+      [RV32,           RV64],
+      [riscv_i32_pair, riscv_i64_pair]>;
+
+def XLenPairRI : RegInfoByHwMode<
+      [RV32,                RV64],
+      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
+
+// Dummy zero register for use in the register pair containing X0 (as X1 is
+// not read to or written when the X0 register pair is used).
+def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
+
+// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
+// register's existence from changing codegen (due to the regPressureSetLimit
+// for the GPR register class being altered).
+def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
+
+let RegAltNameIndices = [ABIRegAltName] in {
+  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
+                                    [X0, DUMMY_REG_PAIR_WITH_X0],
+                                    X0.AltNames> {
+    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
+    let CoveredBySubRegs = 1;
+  }
+  foreach I = 1-15 in {
+    defvar Index = !shl(I, 1);
+    defvar IndexP1 = !add(Index, 1);
+    defvar Reg = !cast<Register>("X"#Index);
+    defvar RegP1 = !cast<Register>("X"#IndexP1);
+    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
+                                                          Reg.AsmName,
+                                                          [Reg, RegP1],
+                                                          Reg.AltNames> {
+      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
+      let CoveredBySubRegs = 1;
+    }
+  }
+}
+
+let RegInfos = XLenPairRI,
+    DecoderMethod = "DecodeGPRPairRegisterClass" in {
+def GPRPair : RISCVRegisterClass<[XLenPairVT], 64, (add
+    X10_X11, X12_X13, X14_X15, X16_X17,
+    X6_X7,
+    X28_X29, X30_X31,
+    X8_X9,
+    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
+    X0_Pair, X2_X3, X4_X5
+)>;
+
+def GPRPairNoX0 : RISCVRegisterClass<[XLenPairVT], 64, (sub GPRPair, X0_Pair)>;
+} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
+
+//===----------------------------------------------------------------------===//
+//  Floating Point Registers
+//===----------------------------------------------------------------------===//
+
 let RegAltNameIndices = [ABIRegAltName] in {
   def F0_H  : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
   def F1_H  : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
@@ -373,8 +437,51 @@ def FPR64C : RISCVRegisterClass<[f64], 64, (add
   (sequence "F%u_D", 8, 9)
 )>;
 
+//===----------------------------------------------------------------------===//
+// GPR Classes for "H/F/D in X"
+//===----------------------------------------------------------------------===//
+
+// 16-bit GPR sub-register class used by Zhinx instructions.
+def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
+                                                (sequence "X%u_H", 5, 7),
+                                                (sequence "X%u_H", 28, 31),
+                                                (sequence "X%u_H", 8, 9),
+                                                (sequence "X%u_H", 18, 27),
+                                                (sequence "X%u_H", 0, 4))>;
+def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
+                                                 (sequence "X%u_H", 8, 9))>;
+def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
+
+def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
+                                                (sequence "X%u_W", 5, 7),
+                                                (sequence "X%u_W", 28, 31),
+                                                (sequence "X%u_W", 8, 9),
+                                                (sequence "X%u_W", 18, 27),
+                                                (sequence "X%u_W", 0, 4))>;
+def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
+                                                 (sequence "X%u_W", 8, 9))>;
+def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
+
+let DecoderMethod = "DecodeGPRPairRegisterClass" in
+def GPRF64Pair : RISCVRegisterClass<[XLenPairFVT], 64, (add
+    X10_X11, X12_X13, X14_X15, X16_X17,
+    X6_X7,
+    X28_X29, X30_X31,
+    X8_X9,
+    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
+    X0_Pair, X2_X3, X4_X5
+)>;
+
+def GPRF64PairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
+  X10_X11, X12_X13, X14_X15, X8_X9
+)>;
+
+def GPRF64PairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRF64Pair, X0_Pair)>;
+
+//===----------------------------------------------------------------------===//
 // Vector type mapping to LLVM types.
-//
+//===----------------------------------------------------------------------===//
+
 // The V vector extension requires that VLEN >= 128 and <= 65536.
 // Additionally, the only supported ELEN values are 32 and 64,
 // thus `vscale` can be defined as VLEN/64,
@@ -534,7 +641,10 @@ class VRegList<list<dag> LIn, int start, int nf, int lmul, bit isV0> {
         !foreach(i, IndexSet<start, nf, lmul, isV0>.R, "v" # i));
 }
 
+//===----------------------------------------------------------------------===//
 // Vector registers
+//===----------------------------------------------------------------------===//
+
 foreach Index = !range(0, 32, 1) in {
   def V#Index : RISCVReg<Index, "v"#Index>, DwarfRegNum<[!add(Index, 96)]>;
 }
@@ -652,80 +762,6 @@ def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
 
 def VMV0 : VReg<VMaskVTs, (add V0), 1>;
 
-// 16-bit GPR sub-register class used by Zhinx instructions.
-def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
-                                                (sequence "X%u_H", 5, 7),
-                                                (sequence "X%u_H", 28, 31),
-                                                (sequence "X%u_H", 8, 9),
-                                                (sequence "X%u_H", 18, 27),
-                                                (sequence "X%u_H", 0, 4))>;
-def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
-                                                 (sequence "X%u_H", 8, 9))>;
-def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
-
-def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
-                                                (sequence "X%u_W", 5, 7),
-                                                (sequence "X%u_W", 28, 31),
-                                                (sequence "X%u_W", 8, 9),
-                                                (sequence "X%u_W", 18, 27),
-                                                (sequence "X%u_W", 0, 4))>;
-def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
-                                                 (sequence "X%u_W", 8, 9))>;
-def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
-
-def XLenPairRI : RegInfoByHwMode<
-      [RV32,                RV64],
-      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
-
-// Dummy zero register for use in the register pair containing X0 (as X1 is
-// not read to or written when the X0 register pair is used).
-def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
-
-// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
-// register's existence from changing codegen (due to the regPressureSetLimit
-// for the GPR register class being altered).
-def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
-
-let RegAltNameIndices = [ABIRegAltName] in {
-  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
-                                    [X0, DUMMY_REG_PAIR_WITH_X0],
-                                    X0.AltNames> {
-    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
-    let CoveredBySubRegs = 1;
-  }
-  foreach I = 1-15 in {
-    defvar Index = !shl(I, 1);
-    defvar IndexP1 = !add(Index, 1);
-    defvar Reg = !cast<Register>("X"#Index);
-    defvar RegP1 = !cast<Register>("X"#IndexP1);
-    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
-                                                          Reg.AsmName,
-                                                          [Reg, RegP1],
-                                                          Reg.AltNames> {
-      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
-      let CoveredBySubRegs = 1;
-    }
-  }
-}
-
-let RegInfos = XLenPairRI,
-    DecoderMethod = "DecodeGPRPairRegisterClass" in {
-def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
-    X10_X11, X12_X13, X14_X15, X16_X17,
-    X6_X7,
-    X28_X29, X30_X31,
-    X8_X9,
-    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
-    X0_Pair, X2_X3, X4_X5
-)>;
-
-def GPRPairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
-  X10_X11, X12_X13, X14_X15, X8_X9
-)>;
-
-def GPRPairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRPair, X0_Pair)>;
-} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
-
 // The register class is added for inline assembly for vector mask types.
 def VM : VReg<VMaskVTs, (add VR), 1>;
 
@@ -770,7 +806,10 @@ foreach m = LMULList in {
   }
 }
 
-// Special registers
+//===----------------------------------------------------------------------===//
+// Special Registers
+//===----------------------------------------------------------------------===//
+
 def FFLAGS : RISCVReg<0, "fflags">;
 def FRM    : RISCVReg<0, "frm">;
 
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index bf9ed3f3d71655..abceacffe249f4 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -201,6 +201,10 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
     return Min;
   }
 
+  MVT getXLenPairVT() const {
+    return is64Bit() ? MVT::riscv_i64_pair : MVT::riscv_i32_pair;
+  }
+
   /// If the ElementCount or TypeSize \p X is scalable and VScale (VLEN) is
   /// exactly known, returns \p X converted to a fixed quantity. Otherwise
   /// returns \p X unmodified.
diff --git a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
new file mode 100644
index 00000000000000..a7f121c67e4abd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define i64 @test_Pr_wide_scalar_simple(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_simple:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i64 %0)
+  ret i64 %1
+}
+
+define i32 @test_Pr_wide_scalar_with_ops(i32 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    or a0, a2, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = zext i32 %0 to i64
+  %2 = shl i64 %1, 32
+  %3 = or i64 %1, %2
+  %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i64 %3)
+  %5 = trunc i64 %4 to i32
+  %6 = lshr i64 %4, 32
+  %7 = trunc i64 %6 to i32
+  %8 = or i32 %5, %7
+  ret i32 %8
+}
+
+define i64 @test_Pr_wide_scalar_inout(ptr %0, i64 noundef %1) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_inout:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    sw a0, 12(sp)
+; CHECK-NEXT:    mv a2, a1
+; CHECK-NEXT:    sw a1, 0(sp)
+; CHECK-NEXT:    sw a3, 4(sp)
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a0; a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    sw a0, 12(sp)
+; CHECK-NEXT:    sw a2, 0(sp)
+; CHECK-NEXT:    sw a3, 4(sp)
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+entry:
+  %2 = alloca ptr, align 4
+  %3 = alloca i64, align 8
+  store ptr %0, ptr %2, align 4
+  store i64 %1, ptr %3, align 8
+  %4 = load ptr, ptr %2, align 4
+  %5 = load i64, ptr %3, align 8
+  %6 = call { ptr, i64 } asm sideeffect "/* $0; $1 */", "=r,=^Pr,0,1"(ptr %4, i64 %5)
+  %7 = extractvalue { ptr, i64} %6, 0
+  %8 = extractvalue { ptr, i64 } %6, 1
+  store ptr %7, ptr %2, align 4
+  store i64 %8, ptr %3, align 8
+  %9 = load i64, ptr %3, align 8
+  ret i64 %9
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
new file mode 100644
index 00000000000000..d8b4b2e21c4f84
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define i128 @test_Pr_wide_scalar_simple(i128 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_simple:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i128 %0)
+  ret i128 %1
+}
+
+define i64 @test_Pr_wide_scalar_with_ops(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    or a0, a2, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = zext i64 %0 to i128
+  %2 = shl i128 %1, 64
+  %3 = or i128 %1, %2
+  %4 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i128 %3)
+  %5 = trunc i128 %4 to i64
+  %6 = lshr i128 %4, 64
+  %7 = trunc i128 %6 to i64
+  %8 = or i64 %5, %7
+  ret i64 %8
+}
+
+define i128 @test_Pr_wide_scalar_inout(ptr %0, i128 noundef %1) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_inout:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -32
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    sd a0, 24(sp)
+; CHECK-NEXT:    mv a2, a1
+; CHECK-NEXT:    sd a1, 0(sp)
+; CHECK-NEXT:    sd a3, 8(sp)
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a0; a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    sd a0, 24(sp)
+; CHECK-NEXT:    sd a2, 0(sp)
+; CHECK-NEXT:    sd a3, 8(sp)
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    ret
+entry:
+  %2 = alloca ptr, align 8
+  %3 = alloca i128, align 16
+  store ptr %0, ptr %2, align 8
+  store i128 %1, ptr %3, align 16
+  %4 = load ptr, ptr %2, align 8
+  %5 = load i128, ptr %3, align 16
+  %6 = call { ptr, i128 } asm sideeffect "/* $0; $1 */", "=r,=^Pr,0,1"(ptr %4, i128 %5)
+  %7 = extractvalue { ptr, i128} %6, 0
+  %8 = extractvalue { ptr, i128 } %6, 1
+  store ptr %7, ptr %2, align 8
+  store i128 %8, ptr %3, align 16
+  %9 = load i128, ptr %3, align 16
+  ret i128 %9
+}

>From 57069e9e4ace019bc9b8279c3d0ce078d639312e Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Thu, 24 Oct 2024 06:55:30 -0700
Subject: [PATCH 3/3] [RISCV] Allow 'Pr' for f64 on rv32 D in X

I think it's not unreasonable that people writing for just rv32 dinx
code would expect to be able to use `Pr` for double values, given that
these are actually passed in a pair of GPRs.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  2 ++
 .../CodeGen/RISCV/zdinx-asm-constraint.ll     | 26 +++++++++++++++++++
 2 files changed, 28 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a439cccb38f345..33aeb1aa718e85 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20572,6 +20572,8 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
         return std::make_pair(0U, &RISCV::GPRCRegClass);
     }
   } else if (Constraint == "Pr") {
+    if (VT == MVT::f64 && !Subtarget.is64Bit() && Subtarget.hasStdExtZdinx())
+      return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
     return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
   }
 
diff --git a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
index 18bd41a210f53f..9f8acb6370c6f5 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
@@ -26,6 +26,32 @@ entry:
   ret void
 }
 
+define dso_local void @zdinx_asm_Pr(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
+; CHECK-LABEL: zdinx_asm_Pr:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    mv a5, a4
+; CHECK-NEXT:    mv s1, a2
+; CHECK-NEXT:    mv a4, a3
+; CHECK-NEXT:    mv s0, a1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    fsgnjx.d a2, s0, a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    sw a2, 8(a0)
+; CHECK-NEXT:    sw a3, 12(a0)
+; CHECK-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+entry:
+  %arrayidx = getelementptr inbounds double, ptr %a, i32 1
+  %0 = tail call double asm "fsgnjx.d $0, $1, $2", "=^Pr,^Pr,^Pr"(double %b, double %c)
+  store double %0, ptr %arrayidx, align 8
+  ret void
+}
+
 define dso_local void @zfinx_asm(ptr nocapture noundef writeonly %a, float noundef %b, float noundef %c) nounwind {
 ; CHECK-LABEL: zfinx_asm:
 ; CHECK:       # %bb.0: # %entry



More information about the llvm-commits mailing list