[clang] [llvm] [RISCV] Inline Assembly Support for GPR Pairs ('Pr') (PR #112983)

Sam Elliott via cfe-commits cfe-commits at lists.llvm.org
Tue Oct 22 12:48:20 PDT 2024


https://github.com/lenary updated https://github.com/llvm/llvm-project/pull/112983

>From daac613c31425a0ee996170a73016ff18f1f215e Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 12:36:47 -0700
Subject: [PATCH 1/2] [RISCV][NFC] Split branch-relaxation.ll

This really doesn't like my later changes, but really only because of
the tests that the comments tell you to ignore. Instead, make two
separate files, each with correct and expected inputs, for testing.
---
 .../CodeGen/RISCV/branch-relaxation-rv32.ll   | 1010 ++++++++++++++++
 .../CodeGen/RISCV/branch-relaxation-rv64.ll   | 1013 +++++++++++++++++
 2 files changed, 2023 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
 create mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll

diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
new file mode 100644
index 00000000000000..69aaa47e7482aa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
@@ -0,0 +1,1010 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -filetype=obj < %s \
+; RUN:   -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs \
+; RUN:   -filetype=obj < %s -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB0_1
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 4096
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB0_2: # %tail
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %tail
+
+iftrue:
+  call void asm sideeffect ".space 4096", ""()
+  br label %tail
+
+tail:
+  ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB1_1
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    jump .LBB1_2, a0
+; CHECK-NEXT:  .LBB1_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %jmp
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB1_3: # %tail
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %jmp
+
+jmp:
+  call void asm sideeffect "", ""()
+  br label %tail
+
+iftrue:
+  call void asm sideeffect "", ""()
+  br label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %tail
+
+tail:
+  ret i32 1
+}
+
+define void @relax_jal_spill_32() {
+; CHECK-LABEL: relax_jal_spill_32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB2_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB2_4, s11
+; CHECK-NEXT:  .LBB2_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB2_2
+; CHECK-NEXT:  .LBB2_4: # %branch_2
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB2_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i32 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_32_adjust_spill_slot() {
+  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+  ; is out the range of 12-bit signed integer, check whether the spill slot is
+  ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_32_adjust_spill_slot:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
+; CHECK-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    addi s0, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addi a0, a0, -2032
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    srli a0, sp, 12
+; CHECK-NEXT:    slli sp, a0, 12
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB3_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB3_4, s11
+; CHECK-NEXT:  .LBB3_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB3_2
+; CHECK-NEXT:  .LBB3_4: # %branch_2
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB3_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    addi sp, s0, -2032
+; CHECK-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    ret
+  %stack_obj = alloca i32, align 4096
+
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i32 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_32_restore_block_correspondence() {
+; CHECK-LABEL: relax_jal_spill_32_restore_block_correspondence:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    bne t5, t6, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_8: # %dest_1
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB4_1: # %dest_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %cond_2
+; CHECK-NEXT:    bne t3, t4, .LBB4_5
+; CHECK-NEXT:  .LBB4_3: # %dest_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB4_4: # %dest_3
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_5: # %cond_3
+; CHECK-NEXT:    beq t1, t2, .LBB4_4
+; CHECK-NEXT:  # %bb.6: # %space
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  # %bb.7: # %space
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB4_8, s11
+entry:
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  br label %cond_1
+
+cond_1:
+  %cmp1 = icmp eq i32 %t5, %t6
+  br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+  %cmp2 = icmp eq i32 %t3, %t4
+  br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+  %cmp3 = icmp eq i32 %t1, %t2
+  br i1 %cmp3, label %dest_3, label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %dest_1
+
+dest_1:
+  call void asm sideeffect "# dest 1", ""()
+  br label %dest_2
+
+dest_2:
+  call void asm sideeffect "# dest 2", ""()
+  br label %dest_3
+
+dest_3:
+  call void asm sideeffect "# dest 3", ""()
+  br label %tail
+
+tail:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
new file mode 100644
index 00000000000000..90ef390ab68873
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
@@ -0,0 +1,1013 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -filetype=obj < %s \
+; RUN:   -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs \
+; RUN:   -filetype=obj < %s -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB0_1
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 4096
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB0_2: # %tail
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %tail
+
+iftrue:
+  call void asm sideeffect ".space 4096", ""()
+  br label %tail
+
+tail:
+  ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB1_1
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    jump .LBB1_2, a0
+; CHECK-NEXT:  .LBB1_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %jmp
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB1_3: # %tail
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %jmp
+
+jmp:
+  call void asm sideeffect "", ""()
+  br label %tail
+
+iftrue:
+  call void asm sideeffect "", ""()
+  br label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %tail
+
+tail:
+  ret i32 1
+}
+
+
+define void @relax_jal_spill_64() {
+;
+; CHECK-LABEL: relax_jal_spill_64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB2_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB2_4, s11
+; CHECK-NEXT:  .LBB2_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB2_2
+; CHECK-NEXT:  .LBB2_4: # %branch_2
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB2_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i64 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_64_adjust_spill_slot() {
+;
+  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+  ; is out the range of 12-bit signed integer, check whether the spill slot is
+  ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_64_adjust_spill_slot:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
+; CHECK-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    addi s0, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addiw a0, a0, -2032
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    srli a0, sp, 12
+; CHECK-NEXT:    slli sp, a0, 12
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB3_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB3_4, s11
+; CHECK-NEXT:  .LBB3_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB3_2
+; CHECK-NEXT:  .LBB3_4: # %branch_2
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB3_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    addi sp, s0, -2032
+; CHECK-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    ret
+  %stack_obj = alloca i64, align 4096
+
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i64 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_64_restore_block_correspondence() {
+;
+; CHECK-LABEL: relax_jal_spill_64_restore_block_correspondence:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    bne t5, t6, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_8: # %dest_1
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB4_1: # %dest_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %cond_2
+; CHECK-NEXT:    bne t3, t4, .LBB4_5
+; CHECK-NEXT:  .LBB4_3: # %dest_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB4_4: # %dest_3
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_5: # %cond_3
+; CHECK-NEXT:    beq t1, t2, .LBB4_4
+; CHECK-NEXT:  # %bb.6: # %space
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  # %bb.7: # %space
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB4_8, s11
+entry:
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  br label %cond_1
+
+cond_1:
+  %cmp1 = icmp eq i64 %t5, %t6
+  br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+  %cmp2 = icmp eq i64 %t3, %t4
+  br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+  %cmp3 = icmp eq i64 %t1, %t2
+  br i1 %cmp3, label %dest_3, label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %dest_1
+
+dest_1:
+  call void asm sideeffect "# dest 1", ""()
+  br label %dest_2
+
+dest_2:
+  call void asm sideeffect "# dest 2", ""()
+  br label %dest_3
+
+dest_3:
+  call void asm sideeffect "# dest 3", ""()
+  br label %tail
+
+tail:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}

>From 2e939b66c68ea24ded1acee077de5940356fb3b4 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 12:37:48 -0700
Subject: [PATCH 2/2] [RISCV] GPR Pairs for Inline Asm

---
 clang/lib/Basic/Targets/RISCV.cpp             |  11 +-
 clang/test/CodeGen/RISCV/riscv-inline-asm.c   |  13 ++
 llvm/include/llvm/CodeGen/ValueTypes.td       |  25 ++-
 llvm/lib/CodeGen/ValueTypes.cpp               |   6 +
 .../Target/RISCV/AsmParser/RISCVAsmParser.cpp |  22 +-
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   |  31 ++-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  51 ++++-
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |  16 ++
 llvm/lib/Target/RISCV/RISCVInstrInfoD.td      |  12 +-
 llvm/lib/Target/RISCV/RISCVRegisterInfo.td    | 195 +++++++++++-------
 llvm/lib/Target/RISCV/RISCVSubtarget.h        |   4 +
 .../CodeGen/RISCV/rv32-inline-asm-pairs.ll    |  38 ++++
 .../CodeGen/RISCV/rv64-inline-asm-pairs.ll    |  38 ++++
 13 files changed, 351 insertions(+), 111 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll

diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index eaaba7642bd7b2..07bf002ed73928 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -108,6 +108,14 @@ bool RISCVTargetInfo::validateAsmConstraint(
       return true;
     }
     return false;
+  case 'P':
+    // An even-odd register pair - GPR
+    if (Name[1] == 'r') {
+      Info.setAllowsRegister();
+      Name += 1;
+      return true;
+    }
+    return false;
   case 'v':
     // A vector register.
     if (Name[1] == 'r' || Name[1] == 'd' || Name[1] == 'm') {
@@ -122,8 +130,9 @@ bool RISCVTargetInfo::validateAsmConstraint(
 std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
   std::string R;
   switch (*Constraint) {
-  // c* and v* are two-letter constraints on RISC-V.
+  // c*, P*, and v* are all two-letter constraints on RISC-V.
   case 'c':
+  case 'P':
   case 'v':
     R = std::string("^") + std::string(Constraint, 2);
     Constraint += 1;
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm.c b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
index 75b91d3c497c50..eb6e42f3eb9529 100644
--- a/clang/test/CodeGen/RISCV/riscv-inline-asm.c
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
@@ -33,6 +33,19 @@ void test_cf(float f, double d) {
   asm volatile("" : "=cf"(cd) : "cf"(d));
 }
 
+#if __riscv_xlen == 32
+typedef long long double_xlen_t;
+#elif __riscv_xlen == 64
+typedef __int128_t double_xlen_t;
+#endif
+double_xlen_t test_Pr_wide_scalar(double_xlen_t p) {
+// CHECK-LABEL: define{{.*}} {{i128|i64}} @test_Pr_wide_scalar(
+// CHECK: call {{i128|i64}} asm sideeffect "", "=^Pr,^Pr"({{i128|i64}} %{{.*}})
+  double_xlen_t ret;
+  asm volatile("" : "=Pr"(ret) : "Pr"(p));
+  return ret;
+}
+
 void test_I(void) {
 // CHECK-LABEL: define{{.*}} void @test_I()
 // CHECK: call void asm sideeffect "", "I"(i32 2047)
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td
index 493c0cfcab60ce..9c910c0085fce9 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -317,20 +317,23 @@ def riscv_nxv16i8x3  : VTVecTup<384, 3, i8, 220>; // RISCV vector tuple(min_num_
 def riscv_nxv16i8x4  : VTVecTup<512, 4, i8, 221>; // RISCV vector tuple(min_num_elts=16, nf=4)
 def riscv_nxv32i8x2  : VTVecTup<512, 2, i8, 222>; // RISCV vector tuple(min_num_elts=32, nf=2)
 
-def x86mmx    : ValueType<64,   223>;  // X86 MMX value
-def Glue      : ValueType<0,    224>;  // Pre-RA sched glue
-def isVoid    : ValueType<0,    225>;  // Produces no value
-def untyped   : ValueType<8,    226> { // Produces an untyped value
+def riscv_i32_pair : ValueType<64, 223>; // RISCV pair of RV32 GPRs
+def riscv_i64_pair : ValueType<128, 224>; // RISCV pair of RV64 GPRs
+
+def x86mmx    : ValueType<64,   225>;  // X86 MMX value
+def Glue      : ValueType<0,    226>;  // Pre-RA sched glue
+def isVoid    : ValueType<0,    227>;  // Produces no value
+def untyped   : ValueType<8,    228> { // Produces an untyped value
   let LLVMName = "Untyped";
 }
-def funcref   : ValueType<0,    227>;  // WebAssembly's funcref type
-def externref : ValueType<0,    228>;  // WebAssembly's externref type
-def exnref    : ValueType<0,    229>;  // WebAssembly's exnref type
-def x86amx    : ValueType<8192, 230>;  // X86 AMX value
-def i64x8     : ValueType<512,  231>;  // 8 Consecutive GPRs (AArch64)
+def funcref   : ValueType<0,    229>;  // WebAssembly's funcref type
+def externref : ValueType<0,    230>;  // WebAssembly's externref type
+def exnref    : ValueType<0,    231>;  // WebAssembly's exnref type
+def x86amx    : ValueType<8192, 232>;  // X86 AMX value
+def i64x8     : ValueType<512,  233>;  // 8 Consecutive GPRs (AArch64)
 def aarch64svcount
-              : ValueType<16,  232>;  // AArch64 predicate-as-counter
-def spirvbuiltin : ValueType<0, 233>; // SPIR-V's builtin type
+              : ValueType<16,  234>;  // AArch64 predicate-as-counter
+def spirvbuiltin : ValueType<0, 235>; // SPIR-V's builtin type
 
 let isNormalValueType = false in {
 def token      : ValueType<0, 504>;  // TokenTy
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index e3c746b274dde1..7ce7102fe98a5f 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -177,6 +177,10 @@ std::string EVT::getEVTString() const {
     if (isFloatingPoint())
       return "f" + utostr(getSizeInBits());
     llvm_unreachable("Invalid EVT!");
+  case MVT::riscv_i32_pair:
+    return "riscv_i32_pair";
+  case MVT::riscv_i64_pair:
+    return "riscv_i64_pair";
   case MVT::bf16:      return "bf16";
   case MVT::ppcf128:   return "ppcf128";
   case MVT::isVoid:    return "isVoid";
@@ -214,6 +218,8 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
     assert(isExtended() && "Type is not extended!");
     return LLVMTy;
   case MVT::isVoid:  return Type::getVoidTy(Context);
+  case MVT::riscv_i32_pair: return IntegerType::get(Context, 64);
+  case MVT::riscv_i64_pair: return IntegerType::get(Context, 128);
   case MVT::x86mmx:  return llvm::FixedVectorType::get(llvm::IntegerType::get(Context, 64), 1);
   case MVT::aarch64svcount:
     return TargetExtType::get(Context, "aarch64.svcount");
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 0bc35846627c0f..e916dc56556966 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -481,6 +481,12 @@ struct RISCVOperand final : public MCParsedAsmOperand {
            RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum);
   }
 
+  bool isGPRPair() const {
+    return Kind == KindTy::Register &&
+           RISCVMCRegisterClasses[RISCV::GPRPairRegClassID].contains(
+               Reg.RegNum);
+  }
+
   bool isGPRF16() const {
     return Kind == KindTy::Register &&
            RISCVMCRegisterClasses[RISCV::GPRF16RegClassID].contains(Reg.RegNum);
@@ -491,17 +497,17 @@ struct RISCVOperand final : public MCParsedAsmOperand {
            RISCVMCRegisterClasses[RISCV::GPRF32RegClassID].contains(Reg.RegNum);
   }
 
-  bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
-  bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
-  bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
-  bool isGPRPairAsFPR() const { return isGPRPair() && Reg.IsGPRAsFPR; }
-
-  bool isGPRPair() const {
+  bool isGPRF64Pair() const {
     return Kind == KindTy::Register &&
-           RISCVMCRegisterClasses[RISCV::GPRPairRegClassID].contains(
+           RISCVMCRegisterClasses[RISCV::GPRF64PairRegClassID].contains(
                Reg.RegNum);
   }
 
+  bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
+  bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
+  bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
+  bool isGPRPairAsFPR64() const { return isGPRF64Pair() && Reg.IsGPRAsFPR; }
+
   static bool evaluateConstantImm(const MCExpr *Expr, int64_t &Imm,
                                   RISCVMCExpr::VariantKind &VK) {
     if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
@@ -2383,7 +2389,7 @@ ParseStatus RISCVAsmParser::parseGPRPairAsFPR64(OperandVector &Operands) {
   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   MCRegister Pair = RI->getMatchingSuperReg(
       Reg, RISCV::sub_gpr_even,
-      &RISCVMCRegisterClasses[RISCV::GPRPairRegClassID]);
+      &RISCVMCRegisterClasses[RISCV::GPRF64PairRegClassID]);
   Operands.push_back(RISCVOperand::createReg(Pair, S, E, /*isGPRAsFPR=*/true));
   return ParseStatus::Success;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index dc3f8254cb4e00..1abb693eb47665 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -953,6 +953,35 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     ReplaceNode(Node, Res);
     return;
   }
+  case RISCVISD::BuildGPRPair: {
+    SDValue Ops[] = {
+        CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
+        Node->getOperand(0),
+        CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
+        Node->getOperand(1),
+        CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
+
+    SDNode *N = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
+                                       Subtarget->getXLenPairVT(), Ops);
+    ReplaceNode(Node, N);
+    return;
+  }
+  case RISCVISD::SplitGPRPair: {
+    if (!SDValue(Node, 0).use_empty()) {
+      SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL, VT,
+                                                  Node->getOperand(0));
+      ReplaceUses(SDValue(Node, 0), Lo);
+    }
+
+    if (!SDValue(Node, 1).use_empty()) {
+      SDValue Hi = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_odd, DL, VT,
+                                                  Node->getOperand(0));
+      ReplaceUses(SDValue(Node, 1), Hi);
+    }
+
+    CurDAG->RemoveDeadNode(Node);
+    return;
+  }
   case RISCVISD::BuildPairF64: {
     if (!Subtarget->hasStdExtZdinx())
       break;
@@ -960,7 +989,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     assert(!Subtarget->is64Bit() && "Unexpected subtarget");
 
     SDValue Ops[] = {
-        CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
+        CurDAG->getTargetConstant(RISCV::GPRF64PairRegClassID, DL, MVT::i32),
         Node->getOperand(0),
         CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
         Node->getOperand(1),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 677910ec0b5591..25cc770670b3cf 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -114,9 +114,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   }
 
   MVT XLenVT = Subtarget.getXLenVT();
+  MVT XLenPairVT = Subtarget.getXLenPairVT();
 
   // Set up the register classes.
   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
+  addRegisterClass(XLenPairVT, &RISCV::GPRPairRegClass);
 
   if (Subtarget.hasStdExtZfhmin())
     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
@@ -134,7 +136,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     if (Subtarget.is64Bit())
       addRegisterClass(MVT::f64, &RISCV::GPRRegClass);
     else
-      addRegisterClass(MVT::f64, &RISCV::GPRPairRegClass);
+      addRegisterClass(MVT::f64, &RISCV::GPRF64PairRegClass);
   }
 
   static const MVT::SimpleValueType BoolVecVTs[] = {
@@ -296,6 +298,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setCondCodeAction(ISD::SETLE, XLenVT, Expand);
   }
 
+  if (Subtarget.isRV64())
+    setOperationAction(ISD::BITCAST, MVT::i128, Custom);
+  else if (Subtarget.isRV32())
+    setOperationAction(ISD::BITCAST, MVT::i64, Custom);
+
   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
 
   setOperationAction(ISD::VASTART, MVT::Other, Custom);
@@ -2216,6 +2223,17 @@ bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
   return Index == 0 || Index == ResElts;
 }
 
+EVT RISCVTargetLowering::getAsmOperandValueType(const DataLayout &DL, Type *Ty,
+                                                bool AllowUnknown) const {
+  if (Subtarget.isRV32() && Ty->isIntegerTy(64))
+    return MVT::riscv_i32_pair;
+
+  if (Subtarget.isRV64() && Ty->isIntegerTy(128))
+    return MVT::riscv_i64_pair;
+
+  return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
+}
+
 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                       CallingConv::ID CC,
                                                       EVT VT) const {
@@ -6405,6 +6423,13 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
       std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, MVT::i32, MVT::i32);
       return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
     }
+    if (VT == Subtarget.getXLenPairVT() && Op0VT.isScalarInteger() &&
+        Op0VT.getSizeInBits() == 2 * Subtarget.getXLen()) {
+      SDValue Lo, Hi;
+      std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, XLenVT, XLenVT);
+      return DAG.getNode(RISCVISD::BuildGPRPair, DL, Subtarget.getXLenPairVT(),
+                         Lo, Hi);
+    }
 
     // Consider other scalar<->scalar casts as legal if the types are legal.
     // Otherwise expand them.
@@ -12846,6 +12871,14 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
                                    NewReg.getValue(0), NewReg.getValue(1));
       Results.push_back(RetReg);
+    } else if (VT.isInteger() &&
+               VT.getSizeInBits() == 2 * Subtarget.getXLen() &&
+               Op0VT == Subtarget.getXLenPairVT()) {
+      SDValue NewReg = DAG.getNode(RISCVISD::SplitGPRPair, DL,
+                                   DAG.getVTList(XLenVT, XLenVT), Op0);
+      SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, VT, NewReg.getValue(0),
+                                   NewReg.getValue(1));
+      Results.push_back(RetReg);
     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
                isTypeLegal(Op0VT)) {
       // Custom-legalize bitcasts from fixed-length vector types to illegal
@@ -20090,6 +20123,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(TAIL)
   NODE_NAME_CASE(SELECT_CC)
   NODE_NAME_CASE(BR_CC)
+  NODE_NAME_CASE(BuildGPRPair)
+  NODE_NAME_CASE(SplitGPRPair)
   NODE_NAME_CASE(BuildPairF64)
   NODE_NAME_CASE(SplitF64)
   NODE_NAME_CASE(ADD_LO)
@@ -20368,6 +20403,8 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
       return C_RegisterClass;
     if (Constraint == "cr" || Constraint == "cf")
       return C_RegisterClass;
+    if (Constraint == "Pr")
+      return C_RegisterClass;
   }
   return TargetLowering::getConstraintType(Constraint);
 }
@@ -20389,7 +20426,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
         return std::make_pair(0U, &RISCV::GPRF32NoX0RegClass);
       if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+        return std::make_pair(0U, &RISCV::GPRF64PairNoX0RegClass);
       return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
     case 'f':
       if (VT == MVT::f16) {
@@ -20406,7 +20443,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
         if (Subtarget.hasStdExtD())
           return std::make_pair(0U, &RISCV::FPR64RegClass);
         if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-          return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+          return std::make_pair(0U, &RISCV::GPRF64PairNoX0RegClass);
         if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
           return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
       }
@@ -20448,7 +20485,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
     if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
       return std::make_pair(0U, &RISCV::GPRF32CRegClass);
     if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-      return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+      return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
     if (!VT.isVector())
       return std::make_pair(0U, &RISCV::GPRCRegClass);
   } else if (Constraint == "cf") {
@@ -20466,10 +20503,12 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (Subtarget.hasStdExtD())
         return std::make_pair(0U, &RISCV::FPR64CRegClass);
       if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+        return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
       if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
         return std::make_pair(0U, &RISCV::GPRCRegClass);
     }
+  } else if (Constraint == "Pr") {
+    return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
   }
 
   // Clang will correctly decode the usage of register name aliases into their
@@ -20630,7 +20669,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
   // Subtarget into account.
   if (Res.second == &RISCV::GPRF16RegClass ||
       Res.second == &RISCV::GPRF32RegClass ||
-      Res.second == &RISCV::GPRPairRegClass)
+      Res.second == &RISCV::GPRF64PairRegClass)
     return std::make_pair(Res.first, &RISCV::GPRRegClass);
 
   return Res;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 4e116cd8b5b87a..d6d80a56e412ca 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -35,6 +35,7 @@ enum NodeType : unsigned {
   MRET_GLUE,
   CALL,
   TAIL,
+
   /// Select with condition operator - This selects between a true value and
   /// a false value (ops #3 and #4) based on the boolean result of comparing
   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
@@ -44,6 +45,18 @@ enum NodeType : unsigned {
   SELECT_CC,
   BR_CC,
 
+  /// Turn a pair of `i<xlen>`s into a `riscv_i<xlen>_pair`.
+  /// - Output: `riscv_i<xlen>_pair`
+  /// - Input 0: `i<xlen>` low-order bits, for even register.
+  /// - Input 1: `i<xlen>` high-order bits, for odd register.
+  BuildGPRPair,
+
+  /// Turn a `riscv_i<xlen>_pair` into a pair of `i<xlen>`s.
+  /// - Output 0: `i<xlen>` low-order bits, from even register.
+  /// - Output 1: `i<xlen>` high-order bits, from odd register.
+  /// - Input: `riscv_i<xlen>_pair`
+  SplitGPRPair,
+
   /// Turns a pair of `i32`s into an `f64`. Needed for rv32d/ilp32.
   /// - Output: `f64`.
   /// - Input 0: low-order bits (31-0) (as `i32`), for even register.
@@ -544,6 +557,9 @@ class RISCVTargetLowering : public TargetLowering {
 
   bool softPromoteHalfType() const override { return true; }
 
+  EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
+                             bool AllowUnknown = false) const override;
+
   /// Return the register type for a given MVT, ensuring vectors are treated
   /// as a series of gpr sized integers.
   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index 5c8977142ad1b4..34f86534ce9fd3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -36,7 +36,7 @@ def AddrRegImmINX : ComplexPattern<iPTR, 2, "SelectAddrRegImmRV32Zdinx">;
 def GPRPairAsFPR : AsmOperandClass {
   let Name = "GPRPairAsFPR";
   let ParserMethod = "parseGPRPairAsFPR64";
-  let PredicateMethod = "isGPRPairAsFPR";
+  let PredicateMethod = "isGPRPairAsFPR64";
   let RenderMethod = "addRegOperands";
 }
 
@@ -52,7 +52,7 @@ def FPR64INX : RegisterOperand<GPR> {
   let DecoderMethod = "DecodeGPRRegisterClass";
 }
 
-def FPR64IN32X : RegisterOperand<GPRPair> {
+def FPR64IN32X : RegisterOperand<GPRF64Pair> {
   let ParserMatchClass = GPRPairAsFPR;
 }
 
@@ -523,15 +523,15 @@ def PseudoFROUND_D_IN32X : PseudoFROUND<FPR64IN32X, f64>;
 
 /// Loads
 let isCall = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in
-def PseudoRV32ZdinxLD : Pseudo<(outs GPRPair:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
+def PseudoRV32ZdinxLD : Pseudo<(outs GPRF64Pair:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
 def : Pat<(f64 (load (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12))),
           (PseudoRV32ZdinxLD GPR:$rs1, simm12:$imm12)>;
 
 /// Stores
 let isCall = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in
-def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
-def : Pat<(store (f64 GPRPair:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
-          (PseudoRV32ZdinxSD GPRPair:$rs2, GPR:$rs1, simm12:$imm12)>;
+def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRF64Pair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
+def : Pat<(store (f64 GPRF64Pair:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
+          (PseudoRV32ZdinxSD GPRF64Pair:$rs2, GPR:$rs1, simm12:$imm12)>;
 } // Predicates = [HasStdExtZdinx, IsRV32]
 
 let Predicates = [HasStdExtD] in {
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 685f04213afa86..0acca502f10c18 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -74,7 +74,10 @@ def sub_gpr_odd  : SubRegIndex<32, 32> {
 }
 } // Namespace = "RISCV"
 
-// Integer registers
+//===----------------------------------------------------------------------===//
+//  General Purpose Registers (aka Integer Registers)
+//===----------------------------------------------------------------------===//
+
 // CostPerUse is set higher for registers that may not be compressible as they
 // are not part of GPRC, the most restrictive register class used by the
 // compressed instruction set. This will influence the greedy register
@@ -210,6 +213,7 @@ def XLenFVT : ValueTypeByHwMode<[RV64],
                                 [f64]>;
 def XLenPairFVT : ValueTypeByHwMode<[RV32],
                                     [f64]>;
+
 def XLenRI : RegInfoByHwMode<
       [RV32,              RV64],
       [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
@@ -279,7 +283,67 @@ def SR07 : GPRRegisterClass<(add (sequence "X%u", 8, 9),
 
 def GPRX1X5 :  GPRRegisterClass<(add X1, X5)>;
 
-// Floating point registers
+//===----------------------------------------------------------------------===//
+//  Even-Odd GPR Pairs
+//===----------------------------------------------------------------------===//
+
+def XLenPairVT : ValueTypeByHwMode<
+      [RV32,           RV64],
+      [riscv_i32_pair, riscv_i64_pair]>;
+
+def XLenPairRI : RegInfoByHwMode<
+      [RV32,                RV64],
+      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
+
+// Dummy zero register for use in the register pair containing X0 (as X1 is
+// not read to or written when the X0 register pair is used).
+def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
+
+// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
+// register's existence from changing codegen (due to the regPressureSetLimit
+// for the GPR register class being altered).
+def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
+
+let RegAltNameIndices = [ABIRegAltName] in {
+  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
+                                    [X0, DUMMY_REG_PAIR_WITH_X0],
+                                    X0.AltNames> {
+    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
+    let CoveredBySubRegs = 1;
+  }
+  foreach I = 1-15 in {
+    defvar Index = !shl(I, 1);
+    defvar IndexP1 = !add(Index, 1);
+    defvar Reg = !cast<Register>("X"#Index);
+    defvar RegP1 = !cast<Register>("X"#IndexP1);
+    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
+                                                          Reg.AsmName,
+                                                          [Reg, RegP1],
+                                                          Reg.AltNames> {
+      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
+      let CoveredBySubRegs = 1;
+    }
+  }
+}
+
+let RegInfos = XLenPairRI,
+    DecoderMethod = "DecodeGPRPairRegisterClass" in {
+def GPRPair : RISCVRegisterClass<[XLenPairVT], 64, (add
+    X10_X11, X12_X13, X14_X15, X16_X17,
+    X6_X7,
+    X28_X29, X30_X31,
+    X8_X9,
+    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
+    X0_Pair, X2_X3, X4_X5
+)>;
+
+def GPRPairNoX0 : RISCVRegisterClass<[XLenPairVT], 64, (sub GPRPair, X0_Pair)>;
+} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
+
+//===----------------------------------------------------------------------===//
+//  Floating Point Registers
+//===----------------------------------------------------------------------===//
+
 let RegAltNameIndices = [ABIRegAltName] in {
   def F0_H  : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
   def F1_H  : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
@@ -373,8 +437,51 @@ def FPR64C : RISCVRegisterClass<[f64], 64, (add
   (sequence "F%u_D", 8, 9)
 )>;
 
+//===----------------------------------------------------------------------===//
+// GPR Classes for "H/F/D in X"
+//===----------------------------------------------------------------------===//
+
+// 16-bit GPR sub-register class used by Zhinx instructions.
+def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
+                                                (sequence "X%u_H", 5, 7),
+                                                (sequence "X%u_H", 28, 31),
+                                                (sequence "X%u_H", 8, 9),
+                                                (sequence "X%u_H", 18, 27),
+                                                (sequence "X%u_H", 0, 4))>;
+def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
+                                                 (sequence "X%u_H", 8, 9))>;
+def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
+
+def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
+                                                (sequence "X%u_W", 5, 7),
+                                                (sequence "X%u_W", 28, 31),
+                                                (sequence "X%u_W", 8, 9),
+                                                (sequence "X%u_W", 18, 27),
+                                                (sequence "X%u_W", 0, 4))>;
+def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
+                                                 (sequence "X%u_W", 8, 9))>;
+def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
+
+let DecoderMethod = "DecodeGPRPairRegisterClass" in
+def GPRF64Pair : RISCVRegisterClass<[XLenPairFVT], 64, (add
+    X10_X11, X12_X13, X14_X15, X16_X17,
+    X6_X7,
+    X28_X29, X30_X31,
+    X8_X9,
+    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
+    X0_Pair, X2_X3, X4_X5
+)>;
+
+def GPRF64PairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
+  X10_X11, X12_X13, X14_X15, X8_X9
+)>;
+
+def GPRF64PairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRF64Pair, X0_Pair)>;
+
+//===----------------------------------------------------------------------===//
 // Vector type mapping to LLVM types.
-//
+//===----------------------------------------------------------------------===//
+
 // The V vector extension requires that VLEN >= 128 and <= 65536.
 // Additionally, the only supported ELEN values are 32 and 64,
 // thus `vscale` can be defined as VLEN/64,
@@ -534,7 +641,10 @@ class VRegList<list<dag> LIn, int start, int nf, int lmul, bit isV0> {
         !foreach(i, IndexSet<start, nf, lmul, isV0>.R, "v" # i));
 }
 
+//===----------------------------------------------------------------------===//
 // Vector registers
+//===----------------------------------------------------------------------===//
+
 foreach Index = !range(0, 32, 1) in {
   def V#Index : RISCVReg<Index, "v"#Index>, DwarfRegNum<[!add(Index, 96)]>;
 }
@@ -652,80 +762,6 @@ def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
 
 def VMV0 : VReg<VMaskVTs, (add V0), 1>;
 
-// 16-bit GPR sub-register class used by Zhinx instructions.
-def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
-                                                (sequence "X%u_H", 5, 7),
-                                                (sequence "X%u_H", 28, 31),
-                                                (sequence "X%u_H", 8, 9),
-                                                (sequence "X%u_H", 18, 27),
-                                                (sequence "X%u_H", 0, 4))>;
-def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
-                                                 (sequence "X%u_H", 8, 9))>;
-def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
-
-def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
-                                                (sequence "X%u_W", 5, 7),
-                                                (sequence "X%u_W", 28, 31),
-                                                (sequence "X%u_W", 8, 9),
-                                                (sequence "X%u_W", 18, 27),
-                                                (sequence "X%u_W", 0, 4))>;
-def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
-                                                 (sequence "X%u_W", 8, 9))>;
-def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
-
-def XLenPairRI : RegInfoByHwMode<
-      [RV32,                RV64],
-      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
-
-// Dummy zero register for use in the register pair containing X0 (as X1 is
-// not read to or written when the X0 register pair is used).
-def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
-
-// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
-// register's existence from changing codegen (due to the regPressureSetLimit
-// for the GPR register class being altered).
-def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
-
-let RegAltNameIndices = [ABIRegAltName] in {
-  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
-                                    [X0, DUMMY_REG_PAIR_WITH_X0],
-                                    X0.AltNames> {
-    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
-    let CoveredBySubRegs = 1;
-  }
-  foreach I = 1-15 in {
-    defvar Index = !shl(I, 1);
-    defvar IndexP1 = !add(Index, 1);
-    defvar Reg = !cast<Register>("X"#Index);
-    defvar RegP1 = !cast<Register>("X"#IndexP1);
-    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
-                                                          Reg.AsmName,
-                                                          [Reg, RegP1],
-                                                          Reg.AltNames> {
-      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
-      let CoveredBySubRegs = 1;
-    }
-  }
-}
-
-let RegInfos = XLenPairRI,
-    DecoderMethod = "DecodeGPRPairRegisterClass" in {
-def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
-    X10_X11, X12_X13, X14_X15, X16_X17,
-    X6_X7,
-    X28_X29, X30_X31,
-    X8_X9,
-    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
-    X0_Pair, X2_X3, X4_X5
-)>;
-
-def GPRPairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
-  X10_X11, X12_X13, X14_X15, X8_X9
-)>;
-
-def GPRPairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRPair, X0_Pair)>;
-} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
-
 // The register class is added for inline assembly for vector mask types.
 def VM : VReg<VMaskVTs, (add VR), 1>;
 
@@ -770,7 +806,10 @@ foreach m = LMULList in {
   }
 }
 
-// Special registers
+//===----------------------------------------------------------------------===//
+// Special Registers
+//===----------------------------------------------------------------------===//
+
 def FFLAGS : RISCVReg<0, "fflags">;
 def FRM    : RISCVReg<0, "frm">;
 
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index bf9ed3f3d71655..abceacffe249f4 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -201,6 +201,10 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
     return Min;
   }
 
+  MVT getXLenPairVT() const {
+    return is64Bit() ? MVT::riscv_i64_pair : MVT::riscv_i32_pair;
+  }
+
   /// If the ElementCount or TypeSize \p X is scalable and VScale (VLEN) is
   /// exactly known, returns \p X converted to a fixed quantity. Otherwise
   /// returns \p X unmodified.
diff --git a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
new file mode 100644
index 00000000000000..cdfc0a78f15a2a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define i64 @test_Pr_wide_scalar_simple(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_simple:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i64 %0)
+  ret i64 %1
+}
+
+define i32 @test_Pr_wide_scalar_with_ops(i32 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    or a0, a2, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = zext i32 %0 to i64
+  %2 = shl i64 %1, 32
+  %3 = or i64 %1, %2
+  %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i64 %3)
+  %5 = trunc i64 %4 to i32
+  %6 = lshr i64 %4, 32
+  %7 = trunc i64 %6 to i32
+  %8 = or i32 %5, %7
+  ret i32 %8
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
new file mode 100644
index 00000000000000..bbaef784249bcc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define i128 @test_Pr_wide_scalar_simple(i128 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_simple:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i128 %0)
+  ret i128 %1
+}
+
+define i64 @test_Pr_wide_scalar_with_ops(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a2 <- a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    or a0, a2, a3
+; CHECK-NEXT:    ret
+entry:
+  %1 = zext i64 %0 to i128
+  %2 = shl i128 %1, 64
+  %3 = or i128 %1, %2
+  %4 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i128 %3)
+  %5 = trunc i128 %4 to i64
+  %6 = lshr i128 %4, 64
+  %7 = trunc i128 %6 to i64
+  %8 = or i64 %5, %7
+  ret i64 %8
+}



More information about the cfe-commits mailing list