[clang] [llvm] [RISCV] Inline Assembly Support for GPR Pairs ('Pr') (PR #112983)

Sam Elliott via cfe-commits cfe-commits at lists.llvm.org
Tue Oct 22 09:59:12 PDT 2024


https://github.com/lenary updated https://github.com/llvm/llvm-project/pull/112983

>From e986cb6ce09a76e58974888dd644aeea95c290d0 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Mon, 21 Oct 2024 06:31:32 -0700
Subject: [PATCH 1/5] [RISCV][clang] Add 'Pr' GPR Pair Constraint

---
 clang/lib/Basic/Targets/RISCV.cpp           | 11 ++++++++++-
 clang/test/CodeGen/RISCV/riscv-inline-asm.c | 13 +++++++++++++
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index eaaba7642bd7b2..07bf002ed73928 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -108,6 +108,14 @@ bool RISCVTargetInfo::validateAsmConstraint(
       return true;
     }
     return false;
+  case 'P':
+    // An even-odd register pair - GPR
+    if (Name[1] == 'r') {
+      Info.setAllowsRegister();
+      Name += 1;
+      return true;
+    }
+    return false;
   case 'v':
     // A vector register.
     if (Name[1] == 'r' || Name[1] == 'd' || Name[1] == 'm') {
@@ -122,8 +130,9 @@ bool RISCVTargetInfo::validateAsmConstraint(
 std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
   std::string R;
   switch (*Constraint) {
-  // c* and v* are two-letter constraints on RISC-V.
+  // c*, P*, and v* are all two-letter constraints on RISC-V.
   case 'c':
+  case 'P':
   case 'v':
     R = std::string("^") + std::string(Constraint, 2);
     Constraint += 1;
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm.c b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
index 75b91d3c497c50..eb6e42f3eb9529 100644
--- a/clang/test/CodeGen/RISCV/riscv-inline-asm.c
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
@@ -33,6 +33,19 @@ void test_cf(float f, double d) {
   asm volatile("" : "=cf"(cd) : "cf"(d));
 }
 
+#if __riscv_xlen == 32
+typedef long long double_xlen_t;
+#elif __riscv_xlen == 64
+typedef __int128_t double_xlen_t;
+#endif
+double_xlen_t test_Pr_wide_scalar(double_xlen_t p) {
+// CHECK-LABEL: define{{.*}} {{i128|i64}} @test_Pr_wide_scalar(
+// CHECK: call {{i128|i64}} asm sideeffect "", "=^Pr,^Pr"({{i128|i64}} %{{.*}})
+  double_xlen_t ret;
+  asm volatile("" : "=Pr"(ret) : "Pr"(p));
+  return ret;
+}
+
 void test_I(void) {
 // CHECK-LABEL: define{{.*}} void @test_I()
 // CHECK: call void asm sideeffect "", "I"(i32 2047)

>From 0550970e73f08d2da63ae983a172f04e0fbb75e6 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Mon, 21 Oct 2024 14:22:02 -0700
Subject: [PATCH 2/5] [RISCV] WIP: Pairs

---
 llvm/include/llvm/CodeGen/ValueTypes.td       |   25 +-
 llvm/lib/CodeGen/ValueTypes.cpp               |    6 +
 .../Target/RISCV/AsmParser/RISCVAsmParser.cpp |   22 +-
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   |   15 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   61 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |   14 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoD.td      |   14 +-
 llvm/lib/Target/RISCV/RISCVRegisterInfo.td    |  195 +-
 llvm/lib/Target/RISCV/RISCVSubtarget.h        |    4 +
 .../CodeGen/RISCV/branch-relaxation-rv32.ll   | 1010 ++++++
 .../CodeGen/RISCV/branch-relaxation-rv64.ll   | 1013 ++++++
 llvm/test/CodeGen/RISCV/branch-relaxation.ll  | 3226 -----------------
 .../CodeGen/RISCV/rv32-inline-asm-pairs.ll    |   31 +
 .../CodeGen/RISCV/rv64-inline-asm-pairs.ll    |   30 +
 14 files changed, 2327 insertions(+), 3339 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
 create mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/branch-relaxation.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll

diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td
index 493c0cfcab60ce..9c910c0085fce9 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -317,20 +317,23 @@ def riscv_nxv16i8x3  : VTVecTup<384, 3, i8, 220>; // RISCV vector tuple(min_num_
 def riscv_nxv16i8x4  : VTVecTup<512, 4, i8, 221>; // RISCV vector tuple(min_num_elts=16, nf=4)
 def riscv_nxv32i8x2  : VTVecTup<512, 2, i8, 222>; // RISCV vector tuple(min_num_elts=32, nf=2)
 
-def x86mmx    : ValueType<64,   223>;  // X86 MMX value
-def Glue      : ValueType<0,    224>;  // Pre-RA sched glue
-def isVoid    : ValueType<0,    225>;  // Produces no value
-def untyped   : ValueType<8,    226> { // Produces an untyped value
+def riscv_i32_pair : ValueType<64, 223>; // RISCV pair of RV32 GPRs
+def riscv_i64_pair : ValueType<128, 224>; // RISCV pair of RV64 GPRs
+
+def x86mmx    : ValueType<64,   225>;  // X86 MMX value
+def Glue      : ValueType<0,    226>;  // Pre-RA sched glue
+def isVoid    : ValueType<0,    227>;  // Produces no value
+def untyped   : ValueType<8,    228> { // Produces an untyped value
   let LLVMName = "Untyped";
 }
-def funcref   : ValueType<0,    227>;  // WebAssembly's funcref type
-def externref : ValueType<0,    228>;  // WebAssembly's externref type
-def exnref    : ValueType<0,    229>;  // WebAssembly's exnref type
-def x86amx    : ValueType<8192, 230>;  // X86 AMX value
-def i64x8     : ValueType<512,  231>;  // 8 Consecutive GPRs (AArch64)
+def funcref   : ValueType<0,    229>;  // WebAssembly's funcref type
+def externref : ValueType<0,    230>;  // WebAssembly's externref type
+def exnref    : ValueType<0,    231>;  // WebAssembly's exnref type
+def x86amx    : ValueType<8192, 232>;  // X86 AMX value
+def i64x8     : ValueType<512,  233>;  // 8 Consecutive GPRs (AArch64)
 def aarch64svcount
-              : ValueType<16,  232>;  // AArch64 predicate-as-counter
-def spirvbuiltin : ValueType<0, 233>; // SPIR-V's builtin type
+              : ValueType<16,  234>;  // AArch64 predicate-as-counter
+def spirvbuiltin : ValueType<0, 235>; // SPIR-V's builtin type
 
 let isNormalValueType = false in {
 def token      : ValueType<0, 504>;  // TokenTy
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index e3c746b274dde1..7ce7102fe98a5f 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -177,6 +177,10 @@ std::string EVT::getEVTString() const {
     if (isFloatingPoint())
       return "f" + utostr(getSizeInBits());
     llvm_unreachable("Invalid EVT!");
+  case MVT::riscv_i32_pair:
+    return "riscv_i32_pair";
+  case MVT::riscv_i64_pair:
+    return "riscv_i64_pair";
   case MVT::bf16:      return "bf16";
   case MVT::ppcf128:   return "ppcf128";
   case MVT::isVoid:    return "isVoid";
@@ -214,6 +218,8 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
     assert(isExtended() && "Type is not extended!");
     return LLVMTy;
   case MVT::isVoid:  return Type::getVoidTy(Context);
+  case MVT::riscv_i32_pair: return IntegerType::get(Context, 64);
+  case MVT::riscv_i64_pair: return IntegerType::get(Context, 128);
   case MVT::x86mmx:  return llvm::FixedVectorType::get(llvm::IntegerType::get(Context, 64), 1);
   case MVT::aarch64svcount:
     return TargetExtType::get(Context, "aarch64.svcount");
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 0bc35846627c0f..e916dc56556966 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -481,6 +481,12 @@ struct RISCVOperand final : public MCParsedAsmOperand {
            RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum);
   }
 
+  bool isGPRPair() const {
+    return Kind == KindTy::Register &&
+           RISCVMCRegisterClasses[RISCV::GPRPairRegClassID].contains(
+               Reg.RegNum);
+  }
+
   bool isGPRF16() const {
     return Kind == KindTy::Register &&
            RISCVMCRegisterClasses[RISCV::GPRF16RegClassID].contains(Reg.RegNum);
@@ -491,17 +497,17 @@ struct RISCVOperand final : public MCParsedAsmOperand {
            RISCVMCRegisterClasses[RISCV::GPRF32RegClassID].contains(Reg.RegNum);
   }
 
-  bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
-  bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
-  bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
-  bool isGPRPairAsFPR() const { return isGPRPair() && Reg.IsGPRAsFPR; }
-
-  bool isGPRPair() const {
+  bool isGPRF64Pair() const {
     return Kind == KindTy::Register &&
-           RISCVMCRegisterClasses[RISCV::GPRPairRegClassID].contains(
+           RISCVMCRegisterClasses[RISCV::GPRF64PairRegClassID].contains(
                Reg.RegNum);
   }
 
+  bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
+  bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
+  bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
+  bool isGPRPairAsFPR64() const { return isGPRF64Pair() && Reg.IsGPRAsFPR; }
+
   static bool evaluateConstantImm(const MCExpr *Expr, int64_t &Imm,
                                   RISCVMCExpr::VariantKind &VK) {
     if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
@@ -2383,7 +2389,7 @@ ParseStatus RISCVAsmParser::parseGPRPairAsFPR64(OperandVector &Operands) {
   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   MCRegister Pair = RI->getMatchingSuperReg(
       Reg, RISCV::sub_gpr_even,
-      &RISCVMCRegisterClasses[RISCV::GPRPairRegClassID]);
+      &RISCVMCRegisterClasses[RISCV::GPRF64PairRegClassID]);
   Operands.push_back(RISCVOperand::createReg(Pair, S, E, /*isGPRAsFPR=*/true));
   return ParseStatus::Success;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index dc3f8254cb4e00..a1c01823784dee 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -953,6 +953,19 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     ReplaceNode(Node, Res);
     return;
   }
+  case RISCVISD::BuildXLenPair: {
+    SDValue Ops[] = {
+        CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
+        Node->getOperand(0),
+        CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
+        Node->getOperand(1),
+        CurDAG->getTargetConstant(RISCV::sub_gpr_odd, DL, MVT::i32)};
+
+    SDNode *N = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
+                                       Subtarget->getXLenPairVT(), Ops);
+    ReplaceNode(Node, N);
+    return;
+  }
   case RISCVISD::BuildPairF64: {
     if (!Subtarget->hasStdExtZdinx())
       break;
@@ -960,7 +973,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     assert(!Subtarget->is64Bit() && "Unexpected subtarget");
 
     SDValue Ops[] = {
-        CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
+        CurDAG->getTargetConstant(RISCV::GPRF64PairRegClassID, DL, MVT::i32),
         Node->getOperand(0),
         CurDAG->getTargetConstant(RISCV::sub_gpr_even, DL, MVT::i32),
         Node->getOperand(1),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3588ef46cadce1..551094bcfe6b43 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -114,9 +114,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   }
 
   MVT XLenVT = Subtarget.getXLenVT();
+  MVT XLenPairVT = Subtarget.getXLenPairVT();
 
   // Set up the register classes.
   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
+  addRegisterClass(XLenPairVT, &RISCV::GPRPairRegClass);
 
   if (Subtarget.hasStdExtZfhmin())
     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
@@ -134,7 +136,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     if (Subtarget.is64Bit())
       addRegisterClass(MVT::f64, &RISCV::GPRRegClass);
     else
-      addRegisterClass(MVT::f64, &RISCV::GPRPairRegClass);
+      addRegisterClass(MVT::f64, &RISCV::GPRF64PairRegClass);
   }
 
   static const MVT::SimpleValueType BoolVecVTs[] = {
@@ -2216,6 +2218,17 @@ bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
   return Index == 0 || Index == ResElts;
 }
 
+EVT RISCVTargetLowering::getAsmOperandValueType(const DataLayout &DL, Type *Ty,
+                                                bool AllowUnknown) const {
+  if (Subtarget.isRV32() && Ty->isIntegerTy(64))
+    return MVT::riscv_i32_pair;
+
+  if (Subtarget.isRV64() && Ty->isIntegerTy(128))
+    return MVT::riscv_i64_pair;
+
+  return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
+}
+
 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                       CallingConv::ID CC,
                                                       EVT VT) const {
@@ -20087,11 +20100,13 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(SRET_GLUE)
   NODE_NAME_CASE(MRET_GLUE)
   NODE_NAME_CASE(CALL)
+  NODE_NAME_CASE(TAIL)
   NODE_NAME_CASE(SELECT_CC)
   NODE_NAME_CASE(BR_CC)
+  NODE_NAME_CASE(BuildXLenPair)
+  NODE_NAME_CASE(SplitXLenPair)
   NODE_NAME_CASE(BuildPairF64)
   NODE_NAME_CASE(SplitF64)
-  NODE_NAME_CASE(TAIL)
   NODE_NAME_CASE(ADD_LO)
   NODE_NAME_CASE(HI)
   NODE_NAME_CASE(LLA)
@@ -20368,6 +20383,8 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
       return C_RegisterClass;
     if (Constraint == "cr" || Constraint == "cf")
       return C_RegisterClass;
+    if (Constraint == "Pr")
+      return C_RegisterClass;
   }
   return TargetLowering::getConstraintType(Constraint);
 }
@@ -20389,7 +20406,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
         return std::make_pair(0U, &RISCV::GPRF32NoX0RegClass);
       if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+        return std::make_pair(0U, &RISCV::GPRF64PairNoX0RegClass);
       return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
     case 'f':
       if (VT == MVT::f16) {
@@ -20406,7 +20423,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
         if (Subtarget.hasStdExtD())
           return std::make_pair(0U, &RISCV::FPR64RegClass);
         if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-          return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+          return std::make_pair(0U, &RISCV::GPRF64PairNoX0RegClass);
         if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
           return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
       }
@@ -20448,7 +20465,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
     if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
       return std::make_pair(0U, &RISCV::GPRF32CRegClass);
     if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-      return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+      return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
     if (!VT.isVector())
       return std::make_pair(0U, &RISCV::GPRCRegClass);
   } else if (Constraint == "cf") {
@@ -20466,10 +20483,12 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (Subtarget.hasStdExtD())
         return std::make_pair(0U, &RISCV::FPR64CRegClass);
       if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+        return std::make_pair(0U, &RISCV::GPRF64PairCRegClass);
       if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
         return std::make_pair(0U, &RISCV::GPRCRegClass);
     }
+  } else if (Constraint == "Pr") {
+    return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
   }
 
   // Clang will correctly decode the usage of register name aliases into their
@@ -20630,7 +20649,7 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
   // Subtarget into account.
   if (Res.second == &RISCV::GPRF16RegClass ||
       Res.second == &RISCV::GPRF32RegClass ||
-      Res.second == &RISCV::GPRPairRegClass)
+      Res.second == &RISCV::GPRF64PairRegClass)
     return std::make_pair(Res.first, &RISCV::GPRRegClass);
 
   return Res;
@@ -21269,6 +21288,24 @@ bool RISCVTargetLowering::splitValueIntoRegisterParts(
     return true;
   }
 
+  if (NumParts == 1 && ValueVT == MVT::i128 && PartVT == MVT::riscv_i64_pair) {
+    // Used on inputs *to* inline assembly.
+    SDValue Lo, Hi;
+    std::tie(Lo, Hi) = DAG.SplitScalar(Val, DL, MVT::i64, MVT::i64);
+    Parts[0] = DAG.getNode(RISCVISD::BuildXLenPair, DL, PartVT, Lo, Hi);
+    return true;
+  }
+
+  if (NumParts == 1 && ValueVT == MVT::i64 && PartVT == MVT::riscv_i32_pair) {
+    // Used on inputs *to* inline assembly.
+    SDValue Lo, Hi;
+    std::tie(Lo, Hi) = DAG.SplitScalar(Val, DL, MVT::i32, MVT::i32);
+    Parts[0] = DAG.getNode(RISCVISD::BuildXLenPair, DL, PartVT, Lo, Hi);
+    return true;
+  }
+
+  // || (ValueVT == MVT::i64 && PartVT == MVT::riscv_i32_pair)
+
   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
     LLVMContext &Context = *DAG.getContext();
     EVT ValueEltVT = ValueVT.getVectorElementType();
@@ -21338,6 +21375,16 @@ SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
     return Val;
   }
 
+  // if (/*ValueVT == MVT::riscv_i64_pair &&*/ PartVT == MVT::riscv_i64_pair) {
+  //   // Used on outputs *from* inline assembly.
+  //   SDValue Val = Parts[0];
+  //   SDValue Pair = DAG.getNode(RISCVISD::SplitXLenPair, DL, {MVT::i64,
+  //   MVT::i64}, Val); return DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT,
+  //   Pair.getValue(0), Pair.getValue(1));
+  // }
+
+  // (PartVT == MVT::i64 && ValueVT == MVT::riscv_i32_pair)
+
   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
     LLVMContext &Context = *DAG.getContext();
     SDValue Val = Parts[0];
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index c3749447955330..a134964199118b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -34,6 +34,8 @@ enum NodeType : unsigned {
   SRET_GLUE,
   MRET_GLUE,
   CALL,
+  TAIL,
+
   /// Select with condition operator - This selects between a true value and
   /// a false value (ops #3 and #4) based on the boolean result of comparing
   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
@@ -42,9 +44,16 @@ enum NodeType : unsigned {
   /// integer or floating point.
   SELECT_CC,
   BR_CC,
+
+  /// Turn a pair of `i<xlen>`s into a `riscv_i<xlen>_pair`.
+  BuildXLenPair,
+  /// Turn a `riscv_i<xlen>_pair` into a pair of `i<xlen>`s.
+  SplitXLenPair,
+
+  /// Turn a pair of `i32`s into an `f64`. Needed for rv32d/ilp32
   BuildPairF64,
+  /// Turn a `f64` into a pair of `i32`s. Needed for rv32d/ilp32
   SplitF64,
-  TAIL,
 
   // Add the Lo 12 bits from an address. Selected to ADDI.
   ADD_LO,
@@ -534,6 +543,9 @@ class RISCVTargetLowering : public TargetLowering {
 
   bool softPromoteHalfType() const override { return true; }
 
+  EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
+                             bool AllowUnknown = false) const override;
+
   /// Return the register type for a given MVT, ensuring vectors are treated
   /// as a series of gpr sized integers.
   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index 5c8977142ad1b4..c3b02e4045c3c1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -36,7 +36,7 @@ def AddrRegImmINX : ComplexPattern<iPTR, 2, "SelectAddrRegImmRV32Zdinx">;
 def GPRPairAsFPR : AsmOperandClass {
   let Name = "GPRPairAsFPR";
   let ParserMethod = "parseGPRPairAsFPR64";
-  let PredicateMethod = "isGPRPairAsFPR";
+  let PredicateMethod = "isGPRPairAsFPR64";
   let RenderMethod = "addRegOperands";
 }
 
@@ -52,7 +52,7 @@ def FPR64INX : RegisterOperand<GPR> {
   let DecoderMethod = "DecodeGPRRegisterClass";
 }
 
-def FPR64IN32X : RegisterOperand<GPRPair> {
+def FPR64IN32X : RegisterOperand<GPRF64Pair> {
   let ParserMatchClass = GPRPairAsFPR;
 }
 
@@ -491,7 +491,7 @@ def : StPat<store, FSD, FPR64, f64>;
 /// Pseudo-instructions needed for the soft-float ABI with RV32D
 
 // Moves two GPRs to an FPR.
-let usesCustomInserter = 1 in
+let usesCustomInserter = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 def BuildPairF64Pseudo
     : Pseudo<(outs FPR64:$dst), (ins GPR:$src1, GPR:$src2),
              [(set FPR64:$dst, (RISCVBuildPairF64 GPR:$src1, GPR:$src2))]>;
@@ -523,15 +523,15 @@ def PseudoFROUND_D_IN32X : PseudoFROUND<FPR64IN32X, f64>;
 
 /// Loads
 let isCall = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in
-def PseudoRV32ZdinxLD : Pseudo<(outs GPRPair:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
+def PseudoRV32ZdinxLD : Pseudo<(outs GPRF64Pair:$dst), (ins GPR:$rs1, simm12:$imm12), []>;
 def : Pat<(f64 (load (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12))),
           (PseudoRV32ZdinxLD GPR:$rs1, simm12:$imm12)>;
 
 /// Stores
 let isCall = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in
-def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
-def : Pat<(store (f64 GPRPair:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
-          (PseudoRV32ZdinxSD GPRPair:$rs2, GPR:$rs1, simm12:$imm12)>;
+def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRF64Pair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>;
+def : Pat<(store (f64 GPRF64Pair:$rs2), (AddrRegImmINX (XLenVT GPR:$rs1), simm12:$imm12)),
+          (PseudoRV32ZdinxSD GPRF64Pair:$rs2, GPR:$rs1, simm12:$imm12)>;
 } // Predicates = [HasStdExtZdinx, IsRV32]
 
 let Predicates = [HasStdExtD] in {
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 685f04213afa86..0acca502f10c18 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -74,7 +74,10 @@ def sub_gpr_odd  : SubRegIndex<32, 32> {
 }
 } // Namespace = "RISCV"
 
-// Integer registers
+//===----------------------------------------------------------------------===//
+//  General Purpose Registers (aka Integer Registers)
+//===----------------------------------------------------------------------===//
+
 // CostPerUse is set higher for registers that may not be compressible as they
 // are not part of GPRC, the most restrictive register class used by the
 // compressed instruction set. This will influence the greedy register
@@ -210,6 +213,7 @@ def XLenFVT : ValueTypeByHwMode<[RV64],
                                 [f64]>;
 def XLenPairFVT : ValueTypeByHwMode<[RV32],
                                     [f64]>;
+
 def XLenRI : RegInfoByHwMode<
       [RV32,              RV64],
       [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
@@ -279,7 +283,67 @@ def SR07 : GPRRegisterClass<(add (sequence "X%u", 8, 9),
 
 def GPRX1X5 :  GPRRegisterClass<(add X1, X5)>;
 
-// Floating point registers
+//===----------------------------------------------------------------------===//
+//  Even-Odd GPR Pairs
+//===----------------------------------------------------------------------===//
+
+def XLenPairVT : ValueTypeByHwMode<
+      [RV32,           RV64],
+      [riscv_i32_pair, riscv_i64_pair]>;
+
+def XLenPairRI : RegInfoByHwMode<
+      [RV32,                RV64],
+      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
+
+// Dummy zero register for use in the register pair containing X0 (as X1 is
+// not read to or written when the X0 register pair is used).
+def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
+
+// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
+// register's existence from changing codegen (due to the regPressureSetLimit
+// for the GPR register class being altered).
+def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
+
+let RegAltNameIndices = [ABIRegAltName] in {
+  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
+                                    [X0, DUMMY_REG_PAIR_WITH_X0],
+                                    X0.AltNames> {
+    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
+    let CoveredBySubRegs = 1;
+  }
+  foreach I = 1-15 in {
+    defvar Index = !shl(I, 1);
+    defvar IndexP1 = !add(Index, 1);
+    defvar Reg = !cast<Register>("X"#Index);
+    defvar RegP1 = !cast<Register>("X"#IndexP1);
+    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
+                                                          Reg.AsmName,
+                                                          [Reg, RegP1],
+                                                          Reg.AltNames> {
+      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
+      let CoveredBySubRegs = 1;
+    }
+  }
+}
+
+let RegInfos = XLenPairRI,
+    DecoderMethod = "DecodeGPRPairRegisterClass" in {
+def GPRPair : RISCVRegisterClass<[XLenPairVT], 64, (add
+    X10_X11, X12_X13, X14_X15, X16_X17,
+    X6_X7,
+    X28_X29, X30_X31,
+    X8_X9,
+    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
+    X0_Pair, X2_X3, X4_X5
+)>;
+
+def GPRPairNoX0 : RISCVRegisterClass<[XLenPairVT], 64, (sub GPRPair, X0_Pair)>;
+} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
+
+//===----------------------------------------------------------------------===//
+//  Floating Point Registers
+//===----------------------------------------------------------------------===//
+
 let RegAltNameIndices = [ABIRegAltName] in {
   def F0_H  : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
   def F1_H  : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
@@ -373,8 +437,51 @@ def FPR64C : RISCVRegisterClass<[f64], 64, (add
   (sequence "F%u_D", 8, 9)
 )>;
 
+//===----------------------------------------------------------------------===//
+// GPR Classes for "H/F/D in X"
+//===----------------------------------------------------------------------===//
+
+// 16-bit GPR sub-register class used by Zhinx instructions.
+def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
+                                                (sequence "X%u_H", 5, 7),
+                                                (sequence "X%u_H", 28, 31),
+                                                (sequence "X%u_H", 8, 9),
+                                                (sequence "X%u_H", 18, 27),
+                                                (sequence "X%u_H", 0, 4))>;
+def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
+                                                 (sequence "X%u_H", 8, 9))>;
+def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
+
+def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
+                                                (sequence "X%u_W", 5, 7),
+                                                (sequence "X%u_W", 28, 31),
+                                                (sequence "X%u_W", 8, 9),
+                                                (sequence "X%u_W", 18, 27),
+                                                (sequence "X%u_W", 0, 4))>;
+def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
+                                                 (sequence "X%u_W", 8, 9))>;
+def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
+
+let DecoderMethod = "DecodeGPRPairRegisterClass" in
+def GPRF64Pair : RISCVRegisterClass<[XLenPairFVT], 64, (add
+    X10_X11, X12_X13, X14_X15, X16_X17,
+    X6_X7,
+    X28_X29, X30_X31,
+    X8_X9,
+    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
+    X0_Pair, X2_X3, X4_X5
+)>;
+
+def GPRF64PairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
+  X10_X11, X12_X13, X14_X15, X8_X9
+)>;
+
+def GPRF64PairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRF64Pair, X0_Pair)>;
+
+//===----------------------------------------------------------------------===//
 // Vector type mapping to LLVM types.
-//
+//===----------------------------------------------------------------------===//
+
 // The V vector extension requires that VLEN >= 128 and <= 65536.
 // Additionally, the only supported ELEN values are 32 and 64,
 // thus `vscale` can be defined as VLEN/64,
@@ -534,7 +641,10 @@ class VRegList<list<dag> LIn, int start, int nf, int lmul, bit isV0> {
         !foreach(i, IndexSet<start, nf, lmul, isV0>.R, "v" # i));
 }
 
+//===----------------------------------------------------------------------===//
 // Vector registers
+//===----------------------------------------------------------------------===//
+
 foreach Index = !range(0, 32, 1) in {
   def V#Index : RISCVReg<Index, "v"#Index>, DwarfRegNum<[!add(Index, 96)]>;
 }
@@ -652,80 +762,6 @@ def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
 
 def VMV0 : VReg<VMaskVTs, (add V0), 1>;
 
-// 16-bit GPR sub-register class used by Zhinx instructions.
-def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
-                                                (sequence "X%u_H", 5, 7),
-                                                (sequence "X%u_H", 28, 31),
-                                                (sequence "X%u_H", 8, 9),
-                                                (sequence "X%u_H", 18, 27),
-                                                (sequence "X%u_H", 0, 4))>;
-def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
-                                                 (sequence "X%u_H", 8, 9))>;
-def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
-
-def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
-                                                (sequence "X%u_W", 5, 7),
-                                                (sequence "X%u_W", 28, 31),
-                                                (sequence "X%u_W", 8, 9),
-                                                (sequence "X%u_W", 18, 27),
-                                                (sequence "X%u_W", 0, 4))>;
-def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
-                                                 (sequence "X%u_W", 8, 9))>;
-def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
-
-def XLenPairRI : RegInfoByHwMode<
-      [RV32,                RV64],
-      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
-
-// Dummy zero register for use in the register pair containing X0 (as X1 is
-// not read to or written when the X0 register pair is used).
-def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
-
-// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
-// register's existence from changing codegen (due to the regPressureSetLimit
-// for the GPR register class being altered).
-def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
-
-let RegAltNameIndices = [ABIRegAltName] in {
-  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
-                                    [X0, DUMMY_REG_PAIR_WITH_X0],
-                                    X0.AltNames> {
-    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
-    let CoveredBySubRegs = 1;
-  }
-  foreach I = 1-15 in {
-    defvar Index = !shl(I, 1);
-    defvar IndexP1 = !add(Index, 1);
-    defvar Reg = !cast<Register>("X"#Index);
-    defvar RegP1 = !cast<Register>("X"#IndexP1);
-    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
-                                                          Reg.AsmName,
-                                                          [Reg, RegP1],
-                                                          Reg.AltNames> {
-      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
-      let CoveredBySubRegs = 1;
-    }
-  }
-}
-
-let RegInfos = XLenPairRI,
-    DecoderMethod = "DecodeGPRPairRegisterClass" in {
-def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
-    X10_X11, X12_X13, X14_X15, X16_X17,
-    X6_X7,
-    X28_X29, X30_X31,
-    X8_X9,
-    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
-    X0_Pair, X2_X3, X4_X5
-)>;
-
-def GPRPairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
-  X10_X11, X12_X13, X14_X15, X8_X9
-)>;
-
-def GPRPairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRPair, X0_Pair)>;
-} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
-
 // The register class is added for inline assembly for vector mask types.
 def VM : VReg<VMaskVTs, (add VR), 1>;
 
@@ -770,7 +806,10 @@ foreach m = LMULList in {
   }
 }
 
-// Special registers
+//===----------------------------------------------------------------------===//
+// Special Registers
+//===----------------------------------------------------------------------===//
+
 def FFLAGS : RISCVReg<0, "fflags">;
 def FRM    : RISCVReg<0, "frm">;
 
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index bf9ed3f3d71655..abceacffe249f4 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -201,6 +201,10 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
     return Min;
   }
 
+  MVT getXLenPairVT() const {
+    return is64Bit() ? MVT::riscv_i64_pair : MVT::riscv_i32_pair;
+  }
+
   /// If the ElementCount or TypeSize \p X is scalable and VScale (VLEN) is
   /// exactly known, returns \p X converted to a fixed quantity. Otherwise
   /// returns \p X unmodified.
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
new file mode 100644
index 00000000000000..69aaa47e7482aa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
@@ -0,0 +1,1010 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -filetype=obj < %s \
+; RUN:   -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs \
+; RUN:   -filetype=obj < %s -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB0_1
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 4096
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB0_2: # %tail
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %tail
+
+iftrue:
+  call void asm sideeffect ".space 4096", ""()
+  br label %tail
+
+tail:
+  ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB1_1
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    jump .LBB1_2, a0
+; CHECK-NEXT:  .LBB1_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %jmp
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB1_3: # %tail
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %jmp
+
+jmp:
+  call void asm sideeffect "", ""()
+  br label %tail
+
+iftrue:
+  call void asm sideeffect "", ""()
+  br label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %tail
+
+tail:
+  ret i32 1
+}
+
+define void @relax_jal_spill_32() {
+; CHECK-LABEL: relax_jal_spill_32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB2_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB2_4, s11
+; CHECK-NEXT:  .LBB2_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB2_2
+; CHECK-NEXT:  .LBB2_4: # %branch_2
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB2_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i32 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_32_adjust_spill_slot() {
+  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+  ; is out the range of 12-bit signed integer, check whether the spill slot is
+  ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_32_adjust_spill_slot:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
+; CHECK-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    addi s0, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addi a0, a0, -2032
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    srli a0, sp, 12
+; CHECK-NEXT:    slli sp, a0, 12
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB3_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB3_4, s11
+; CHECK-NEXT:  .LBB3_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB3_2
+; CHECK-NEXT:  .LBB3_4: # %branch_2
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB3_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    addi sp, s0, -2032
+; CHECK-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    ret
+  %stack_obj = alloca i32, align 4096
+
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i32 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_32_restore_block_correspondence() {
+; CHECK-LABEL: relax_jal_spill_32_restore_block_correspondence:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -4
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -12
+; CHECK-NEXT:    .cfi_offset s2, -16
+; CHECK-NEXT:    .cfi_offset s3, -20
+; CHECK-NEXT:    .cfi_offset s4, -24
+; CHECK-NEXT:    .cfi_offset s5, -28
+; CHECK-NEXT:    .cfi_offset s6, -32
+; CHECK-NEXT:    .cfi_offset s7, -36
+; CHECK-NEXT:    .cfi_offset s8, -40
+; CHECK-NEXT:    .cfi_offset s9, -44
+; CHECK-NEXT:    .cfi_offset s10, -48
+; CHECK-NEXT:    .cfi_offset s11, -52
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    bne t5, t6, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_8: # %dest_1
+; CHECK-NEXT:    lw s11, 0(sp)
+; CHECK-NEXT:  .LBB4_1: # %dest_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %cond_2
+; CHECK-NEXT:    bne t3, t4, .LBB4_5
+; CHECK-NEXT:  .LBB4_3: # %dest_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB4_4: # %dest_3
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 64
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_5: # %cond_3
+; CHECK-NEXT:    beq t1, t2, .LBB4_4
+; CHECK-NEXT:  # %bb.6: # %space
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  # %bb.7: # %space
+; CHECK-NEXT:    sw s11, 0(sp)
+; CHECK-NEXT:    jump .LBB4_8, s11
+entry:
+  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  br label %cond_1
+
+cond_1:
+  %cmp1 = icmp eq i32 %t5, %t6
+  br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+  %cmp2 = icmp eq i32 %t3, %t4
+  br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+  %cmp3 = icmp eq i32 %t1, %t2
+  br i1 %cmp3, label %dest_3, label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %dest_1
+
+dest_1:
+  call void asm sideeffect "# dest 1", ""()
+  br label %dest_2
+
+dest_2:
+  call void asm sideeffect "# dest 2", ""()
+  br label %dest_3
+
+dest_3:
+  call void asm sideeffect "# dest 3", ""()
+  br label %tail
+
+tail:
+  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
new file mode 100644
index 00000000000000..90ef390ab68873
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
@@ -0,0 +1,1013 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -filetype=obj < %s \
+; RUN:   -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs \
+; RUN:   -filetype=obj < %s -o /dev/null 2>&1
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB0_1
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 4096
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB0_2: # %tail
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %tail
+
+iftrue:
+  call void asm sideeffect ".space 4096", ""()
+  br label %tail
+
+tail:
+  ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    bnez a0, .LBB1_1
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    jump .LBB1_2, a0
+; CHECK-NEXT:  .LBB1_1: # %iftrue
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %jmp
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB1_3: # %tail
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  br i1 %a, label %iftrue, label %jmp
+
+jmp:
+  call void asm sideeffect "", ""()
+  br label %tail
+
+iftrue:
+  call void asm sideeffect "", ""()
+  br label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %tail
+
+tail:
+  ret i32 1
+}
+
+
+define void @relax_jal_spill_64() {
+;
+; CHECK-LABEL: relax_jal_spill_64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB2_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB2_4, s11
+; CHECK-NEXT:  .LBB2_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB2_2
+; CHECK-NEXT:  .LBB2_4: # %branch_2
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB2_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i64 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_64_adjust_spill_slot() {
+;
+  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+  ; is out the range of 12-bit signed integer, check whether the spill slot is
+  ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_64_adjust_spill_slot:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -2032
+; CHECK-NEXT:    .cfi_def_cfa_offset 2032
+; CHECK-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    addi s0, sp, 2032
+; CHECK-NEXT:    .cfi_def_cfa s0, 0
+; CHECK-NEXT:    lui a0, 2
+; CHECK-NEXT:    addiw a0, a0, -2032
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    srli a0, sp, 12
+; CHECK-NEXT:    slli sp, a0, 12
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beq t5, t6, .LBB3_1
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB3_4, s11
+; CHECK-NEXT:  .LBB3_1: # %branch_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB3_2
+; CHECK-NEXT:  .LBB3_4: # %branch_2
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB3_2: # %branch_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    addi sp, s0, -2032
+; CHECK-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 2032
+; CHECK-NEXT:    ret
+  %stack_obj = alloca i64, align 4096
+
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  %cmp = icmp eq i64 %t5, %t6
+  br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %branch_2
+
+branch_2:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
+
+define void @relax_jal_spill_64_restore_block_correspondence() {
+;
+; CHECK-LABEL: relax_jal_spill_64_restore_block_correspondence:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li ra, 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t0, 5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t1, 6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t2, 7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s0, 8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s1, 9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a0, 10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a1, 11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a2, 12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a3, 13
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a4, 14
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a5, 15
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li a7, 17
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s2, 18
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s3, 19
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s4, 20
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s5, 21
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s6, 22
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s7, 23
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s8, 24
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s9, 25
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s10, 26
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li s11, 27
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t3, 28
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t4, 29
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t5, 30
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    li t6, 31
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    bne t5, t6, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_8: # %dest_1
+; CHECK-NEXT:    ld s11, 0(sp)
+; CHECK-NEXT:  .LBB4_1: # %dest_1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %cond_2
+; CHECK-NEXT:    bne t3, t4, .LBB4_5
+; CHECK-NEXT:  .LBB4_3: # %dest_2
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  .LBB4_4: # %dest_3
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # dest 3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use ra
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a1
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use a7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s7
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use s11
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t3
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t5
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # reg use t6
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_5: # %cond_3
+; CHECK-NEXT:    beq t1, t2, .LBB4_4
+; CHECK-NEXT:  # %bb.6: # %space
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    .zero 1048576
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:  # %bb.7: # %space
+; CHECK-NEXT:    sd s11, 0(sp)
+; CHECK-NEXT:    jump .LBB4_8, s11
+entry:
+  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+  br label %cond_1
+
+cond_1:
+  %cmp1 = icmp eq i64 %t5, %t6
+  br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+  %cmp2 = icmp eq i64 %t3, %t4
+  br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+  %cmp3 = icmp eq i64 %t1, %t2
+  br i1 %cmp3, label %dest_3, label %space
+
+space:
+  call void asm sideeffect ".space 1048576", ""()
+  br label %dest_1
+
+dest_1:
+  call void asm sideeffect "# dest 1", ""()
+  br label %dest_2
+
+dest_2:
+  call void asm sideeffect "# dest 2", ""()
+  br label %dest_3
+
+dest_3:
+  call void asm sideeffect "# dest 3", ""()
+  br label %tail
+
+tail:
+  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
deleted file mode 100644
index ec77d54da116d3..00000000000000
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ /dev/null
@@ -1,3226 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -filetype=obj < %s \
-; RUN:   -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs \
-; RUN:   -filetype=obj < %s -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
-; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -filetype=obj < %s \
-; RUN:   -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs \
-; RUN:   -filetype=obj < %s -o /dev/null 2>&1
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-
-define void @relax_bcc(i1 %a) nounwind {
-; CHECK-LABEL: relax_bcc:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    bnez a0, .LBB0_1
-; CHECK-NEXT:    j .LBB0_2
-; CHECK-NEXT:  .LBB0_1: # %iftrue
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    .zero 4096
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:  .LBB0_2: # %tail
-; CHECK-NEXT:    ret
-  br i1 %a, label %iftrue, label %tail
-
-iftrue:
-  call void asm sideeffect ".space 4096", ""()
-  br label %tail
-
-tail:
-  ret void
-}
-
-define i32 @relax_jal(i1 %a) nounwind {
-; CHECK-LABEL: relax_jal:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    bnez a0, .LBB1_1
-; CHECK-NEXT:  # %bb.4:
-; CHECK-NEXT:    jump .LBB1_2, a0
-; CHECK-NEXT:  .LBB1_1: # %iftrue
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    .zero 1048576
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    j .LBB1_3
-; CHECK-NEXT:  .LBB1_2: # %jmp
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:  .LBB1_3: # %tail
-; CHECK-NEXT:    li a0, 1
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  br i1 %a, label %iftrue, label %jmp
-
-jmp:
-  call void asm sideeffect "", ""()
-  br label %tail
-
-iftrue:
-  call void asm sideeffect "", ""()
-  br label %space
-
-space:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %tail
-
-tail:
-  ret i32 1
-}
-
-; For functions whose names contain 32, only the CHECK-RV32 lines are
-; meaningful, and for functions whose names contain 64, only the CHECK-RV64
-; lines are meaningful.
-
-define void @relax_jal_spill_32() {
-; CHECK-RV32-LABEL: relax_jal_spill_32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    beq t5, t6, .LBB2_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    sw s11, 0(sp)
-; CHECK-RV32-NEXT:    jump .LBB2_4, s11
-; CHECK-RV32-NEXT:  .LBB2_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    j .LBB2_2
-; CHECK-RV32-NEXT:  .LBB2_4: # %branch_2
-; CHECK-RV32-NEXT:    lw s11, 0(sp)
-; CHECK-RV32-NEXT:  .LBB2_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 64
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -128
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 128
-; CHECK-RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 48(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 40(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 32(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 24(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t5, t5
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t6, t6
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB2_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    jump .LBB2_2, t5
-; CHECK-RV64-NEXT:  .LBB2_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  .LBB2_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 48(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 128
-; CHECK-RV64-NEXT:    ret
-
-  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i32 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_32_adjust_spill_slot() {
-; CHECK-RV32-LABEL: relax_jal_spill_32_adjust_spill_slot:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    addi s0, sp, 2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV32-NEXT:    lui a0, 2
-; CHECK-RV32-NEXT:    addi a0, a0, -2032
-; CHECK-RV32-NEXT:    sub sp, sp, a0
-; CHECK-RV32-NEXT:    srli a0, sp, 12
-; CHECK-RV32-NEXT:    slli sp, a0, 12
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    beq t5, t6, .LBB3_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    sw s11, 0(sp)
-; CHECK-RV32-NEXT:    jump .LBB3_4, s11
-; CHECK-RV32-NEXT:  .LBB3_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    j .LBB3_2
-; CHECK-RV32-NEXT:  .LBB3_4: # %branch_2
-; CHECK-RV32-NEXT:    lw s11, 0(sp)
-; CHECK-RV32-NEXT:  .LBB3_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    addi sp, s0, -2032
-; CHECK-RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 2032
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_32_adjust_spill_slot:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    addi s0, sp, 2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV64-NEXT:    lui a0, 2
-; CHECK-RV64-NEXT:    addiw a0, a0, -2032
-; CHECK-RV64-NEXT:    sub sp, sp, a0
-; CHECK-RV64-NEXT:    srli a0, sp, 12
-; CHECK-RV64-NEXT:    slli sp, a0, 12
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t0, 0(sp)
-; CHECK-RV64-NEXT:    lui t0, 1
-; CHECK-RV64-NEXT:    add t0, sp, t0
-; CHECK-RV64-NEXT:    sd t5, -8(t0) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t5, t5
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui t0, 1
-; CHECK-RV64-NEXT:    add t0, sp, t0
-; CHECK-RV64-NEXT:    sd t6, -16(t0) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    ld t0, 0(sp)
-; CHECK-RV64-NEXT:    sext.w t6, t6
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB3_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    jump .LBB3_2, t5
-; CHECK-RV64-NEXT:  .LBB3_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  .LBB3_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui a0, 1
-; CHECK-RV64-NEXT:    add a0, sp, a0
-; CHECK-RV64-NEXT:    ld t5, -8(a0) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    lui a0, 1
-; CHECK-RV64-NEXT:    add a0, sp, a0
-; CHECK-RV64-NEXT:    ld t6, -16(a0) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    addi sp, s0, -2032
-; CHECK-RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 2032
-; CHECK-RV64-NEXT:    ret
-
-  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
-  ; is out the range of 12-bit signed integer, check whether the spill slot is
-  ; adjusted to close to the stack base register.
-  %stack_obj = alloca i32, align 4096
-
-  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i32 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_64() {
-; CHECK-RV32-LABEL: relax_jal_spill_64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -272
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 272
-; CHECK-RV32-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 260(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 256(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 252(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 248(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 244(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 240(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 236(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 232(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 228(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 224(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 220(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t0, 216(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t1, 212(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t1, 208(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t2, 204(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t2, 200(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t3, 196(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s0, 192(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 188(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s1, 184(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 180(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a1, 176(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a1, 172(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a2, 168(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a2, 164(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a3, 160(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a3, 156(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a4, 152(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a4, 148(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a5, 144(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a5, 140(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a6, 136(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a6, 132(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw a7, 128(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a7, 124(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t0, 120(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s2, 116(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 112(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s3, 108(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 104(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s4, 100(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 96(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s5, 92(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 88(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s6, 84(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 80(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s7, 76(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 72(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s8, 68(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 64(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s9, 60(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 56(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s10, 52(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 48(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw s11, 44(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t3, 40(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t4, 36(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw t4, 32(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t5, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    mv a1, t6
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    sw a1, 24(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a1, a1, s0
-; CHECK-RV32-NEXT:    sw t6, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a2, t5, t6
-; CHECK-RV32-NEXT:    or a1, a2, a1
-; CHECK-RV32-NEXT:    beqz a1, .LBB4_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    jump .LBB4_2, a1
-; CHECK-RV32-NEXT:  .LBB4_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  .LBB4_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t0, 216(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t1, 212(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t1, 208(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t2, 204(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t2, 200(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t3, 196(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s0, 192(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 188(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s1, 184(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 180(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a1, 176(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a1, 172(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a2, 168(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a2, 164(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a3, 160(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a3, 156(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a4, 152(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a4, 148(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a5, 144(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a5, 140(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a6, 136(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a6, 132(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw a7, 128(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw a7, 124(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t0, 120(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s2, 116(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 112(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s3, 108(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 104(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s4, 100(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 96(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s5, 92(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 88(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s6, 84(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 80(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s7, 76(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 72(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s8, 68(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 64(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s9, 60(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 56(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s10, 52(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 48(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw s11, 44(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t3, 40(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t4, 36(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t4, 32(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t5, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw t6, 24(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw t6, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 260(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 256(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 252(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 248(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 244(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 240(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 236(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 232(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 228(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 224(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 220(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 272
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -112
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 112
-; CHECK-RV64-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB4_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    sd s11, 0(sp)
-; CHECK-RV64-NEXT:    jump .LBB4_4, s11
-; CHECK-RV64-NEXT:  .LBB4_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    j .LBB4_2
-; CHECK-RV64-NEXT:  .LBB4_4: # %branch_2
-; CHECK-RV64-NEXT:    ld s11, 0(sp)
-; CHECK-RV64-NEXT:  .LBB4_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 112
-; CHECK-RV64-NEXT:    ret
-
-  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i64 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_64_adjust_spill_slot() {
-; CHECK-RV32-LABEL: relax_jal_spill_64_adjust_spill_slot:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 2020(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 2016(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 2012(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 2008(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 2004(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 2000(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 1996(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 1992(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 1988(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 1984(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 1980(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    addi s0, sp, 2032
-; CHECK-RV32-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV32-NEXT:    lui a0, 2
-; CHECK-RV32-NEXT:    addi a0, a0, -2032
-; CHECK-RV32-NEXT:    sub sp, sp, a0
-; CHECK-RV32-NEXT:    srli a0, sp, 12
-; CHECK-RV32-NEXT:    slli sp, a0, 12
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t0, -4(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t1, -8(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t1, -12(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t2, -16(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t2, -20(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw t3, -24(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s0, -28(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s1, -32(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s1, -36(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    sw s2, -40(a0) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw a1, -44(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a3, 1
-; CHECK-RV32-NEXT:    add a3, sp, a3
-; CHECK-RV32-NEXT:    sw a1, -48(a3) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a2, -52(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a2, -56(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a3, -60(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a3, -64(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a4, -68(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a4, -72(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a5, -76(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a5, -80(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a6, -84(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a6, -88(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a7, -92(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw a7, -96(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t0, -100(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s2, -104(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s3, -108(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s3, -112(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s4, -116(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s4, -120(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s5, -124(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s5, -128(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s6, -132(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s6, -136(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s7, -140(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s7, -144(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s8, -148(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s8, -152(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s9, -156(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s9, -160(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s10, -164(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s10, -168(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s11, -172(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw s11, -176(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t3, -180(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t4, -184(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t4, -188(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    sw t5, -192(a1) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    mv a1, t6
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw s0, -208(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw a1, -196(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a1, a1, s0
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw t6, -200(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    lui a2, 1
-; CHECK-RV32-NEXT:    add a2, sp, a2
-; CHECK-RV32-NEXT:    sw t5, -204(a2) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    xor a2, t5, t6
-; CHECK-RV32-NEXT:    or a1, a2, a1
-; CHECK-RV32-NEXT:    beqz a1, .LBB5_1
-; CHECK-RV32-NEXT:  # %bb.3:
-; CHECK-RV32-NEXT:    jump .LBB5_2, a1
-; CHECK-RV32-NEXT:  .LBB5_1: # %branch_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  .LBB5_2: # %branch_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t0, -4(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t1, -8(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t1, -12(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t2, -16(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t2, -20(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw t3, -24(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s0, -28(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s1, -32(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s1, -36(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw s2, -40(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a1, 1
-; CHECK-RV32-NEXT:    add a1, sp, a1
-; CHECK-RV32-NEXT:    lw a1, -44(a1) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a1, -48(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a2, -52(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a2, -56(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a3, -60(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a3, -64(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a4, -68(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a4, -72(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a5, -76(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a5, -80(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a6, -84(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a6, -88(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a7, -92(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw a7, -96(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t0, -100(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s2, -104(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s3, -108(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s3, -112(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s4, -116(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s4, -120(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s5, -124(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s5, -128(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s6, -132(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s6, -136(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s7, -140(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s7, -144(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s8, -148(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s8, -152(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s9, -156(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s9, -160(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s10, -164(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s10, -168(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s11, -172(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s11, -176(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t3, -180(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t4, -184(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t4, -188(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t5, -192(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t5, -204(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t6, -196(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw s0, -208(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lui a0, 1
-; CHECK-RV32-NEXT:    add a0, sp, a0
-; CHECK-RV32-NEXT:    lw t6, -200(a0) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    addi sp, s0, -2032
-; CHECK-RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 2020(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 2016(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 2012(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 2008(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 2004(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 2000(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 1996(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 1992(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 1988(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 1984(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 1980(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 2032
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: relax_jal_spill_64_adjust_spill_slot:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    addi sp, sp, -2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 2008(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 2000(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 1992(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 1984(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 1976(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 1968(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 1960(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 1952(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 1944(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 1936(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 1928(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    addi s0, sp, 2032
-; CHECK-RV64-NEXT:    .cfi_def_cfa s0, 0
-; CHECK-RV64-NEXT:    lui a0, 2
-; CHECK-RV64-NEXT:    addiw a0, a0, -2032
-; CHECK-RV64-NEXT:    sub sp, sp, a0
-; CHECK-RV64-NEXT:    srli a0, sp, 12
-; CHECK-RV64-NEXT:    slli sp, a0, 12
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    beq t5, t6, .LBB5_1
-; CHECK-RV64-NEXT:  # %bb.3:
-; CHECK-RV64-NEXT:    sd s11, 0(sp)
-; CHECK-RV64-NEXT:    jump .LBB5_4, s11
-; CHECK-RV64-NEXT:  .LBB5_1: # %branch_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    j .LBB5_2
-; CHECK-RV64-NEXT:  .LBB5_4: # %branch_2
-; CHECK-RV64-NEXT:    ld s11, 0(sp)
-; CHECK-RV64-NEXT:  .LBB5_2: # %branch_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    addi sp, s0, -2032
-; CHECK-RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 2008(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 2000(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 1992(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 1984(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 1976(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 1968(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 1960(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 1952(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 1944(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 1936(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 1928(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 2032
-; CHECK-RV64-NEXT:    ret
-
-  ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
-  ; is out the range of 12-bit signed integer, check whether the spill slot is
-  ; adjusted to close to the stack base register.
-  %stack_obj = alloca i64, align 4096
-
-  %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  %cmp = icmp eq i64 %t5, %t6
-  br i1 %cmp, label %branch_1, label %branch_2
-
-branch_1:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %branch_2
-
-branch_2:
-  call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
-
-  ret void
-}
-
-define void @relax_jal_spill_32_restore_block_correspondence() {
-; CHECK-RV32-LABEL: relax_jal_spill_32_restore_block_correspondence:
-; CHECK-RV32:       # %bb.0: # %entry
-; CHECK-RV32-NEXT:    addi sp, sp, -64
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT:    .cfi_offset ra, -4
-; CHECK-RV32-NEXT:    .cfi_offset s0, -8
-; CHECK-RV32-NEXT:    .cfi_offset s1, -12
-; CHECK-RV32-NEXT:    .cfi_offset s2, -16
-; CHECK-RV32-NEXT:    .cfi_offset s3, -20
-; CHECK-RV32-NEXT:    .cfi_offset s4, -24
-; CHECK-RV32-NEXT:    .cfi_offset s5, -28
-; CHECK-RV32-NEXT:    .cfi_offset s6, -32
-; CHECK-RV32-NEXT:    .cfi_offset s7, -36
-; CHECK-RV32-NEXT:    .cfi_offset s8, -40
-; CHECK-RV32-NEXT:    .cfi_offset s9, -44
-; CHECK-RV32-NEXT:    .cfi_offset s10, -48
-; CHECK-RV32-NEXT:    .cfi_offset s11, -52
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li ra, 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t0, 5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t1, 6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t2, 7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s0, 8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s1, 9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a0, 10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a1, 11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a2, 12
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a3, 13
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a4, 14
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a5, 15
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a6, 16
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li a7, 17
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s2, 18
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s3, 19
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s4, 20
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s5, 21
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s6, 22
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s7, 23
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s8, 24
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s9, 25
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s10, 26
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li s11, 27
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t3, 28
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t4, 29
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t5, 30
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    li t6, 31
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    bne t5, t6, .LBB6_2
-; CHECK-RV32-NEXT:    j .LBB6_1
-; CHECK-RV32-NEXT:  .LBB6_8: # %dest_1
-; CHECK-RV32-NEXT:    lw s11, 0(sp)
-; CHECK-RV32-NEXT:  .LBB6_1: # %dest_1
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # dest 1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    j .LBB6_3
-; CHECK-RV32-NEXT:  .LBB6_2: # %cond_2
-; CHECK-RV32-NEXT:    bne t3, t4, .LBB6_5
-; CHECK-RV32-NEXT:  .LBB6_3: # %dest_2
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # dest 2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  .LBB6_4: # %dest_3
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # dest 3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use ra
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a0
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a1
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use a7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s2
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s7
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s8
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s9
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s10
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use s11
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t3
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t4
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t5
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    # reg use t6
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT:    addi sp, sp, 64
-; CHECK-RV32-NEXT:    ret
-; CHECK-RV32-NEXT:  .LBB6_5: # %cond_3
-; CHECK-RV32-NEXT:    beq t1, t2, .LBB6_4
-; CHECK-RV32-NEXT:  # %bb.6: # %space
-; CHECK-RV32-NEXT:    #APP
-; CHECK-RV32-NEXT:    .zero 1048576
-; CHECK-RV32-NEXT:    #NO_APP
-; CHECK-RV32-NEXT:  # %bb.7: # %space
-; CHECK-RV32-NEXT:    sw s11, 0(sp)
-; CHECK-RV32-NEXT:    jump .LBB6_8, s11
-;
-; CHECK-RV64-LABEL: relax_jal_spill_32_restore_block_correspondence:
-; CHECK-RV64:       # %bb.0: # %entry
-; CHECK-RV64-NEXT:    addi sp, sp, -128
-; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 128
-; CHECK-RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s8, 48(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s9, 40(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s10, 32(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sd s11, 24(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    .cfi_offset ra, -8
-; CHECK-RV64-NEXT:    .cfi_offset s0, -16
-; CHECK-RV64-NEXT:    .cfi_offset s1, -24
-; CHECK-RV64-NEXT:    .cfi_offset s2, -32
-; CHECK-RV64-NEXT:    .cfi_offset s3, -40
-; CHECK-RV64-NEXT:    .cfi_offset s4, -48
-; CHECK-RV64-NEXT:    .cfi_offset s5, -56
-; CHECK-RV64-NEXT:    .cfi_offset s6, -64
-; CHECK-RV64-NEXT:    .cfi_offset s7, -72
-; CHECK-RV64-NEXT:    .cfi_offset s8, -80
-; CHECK-RV64-NEXT:    .cfi_offset s9, -88
-; CHECK-RV64-NEXT:    .cfi_offset s10, -96
-; CHECK-RV64-NEXT:    .cfi_offset s11, -104
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li ra, 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t0, 5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t1, 6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t2, 7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s0, 8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s1, 9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a0, 10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a1, 11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a2, 12
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a3, 13
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a4, 14
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a5, 15
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a6, 16
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li a7, 17
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s2, 18
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s3, 19
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s4, 20
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s5, 21
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s6, 22
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s7, 23
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s8, 24
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s9, 25
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s10, 26
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li s11, 27
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t3, 28
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t4, 29
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t5, 30
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    li t6, 31
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t6, t6
-; CHECK-RV64-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT:    sext.w t5, t5
-; CHECK-RV64-NEXT:    bne t5, t6, .LBB6_2
-; CHECK-RV64-NEXT:  .LBB6_1: # %dest_1
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # dest 1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    j .LBB6_3
-; CHECK-RV64-NEXT:  .LBB6_2: # %cond_2
-; CHECK-RV64-NEXT:    sext.w t5, t4
-; CHECK-RV64-NEXT:    sext.w t6, t3
-; CHECK-RV64-NEXT:    bne t6, t5, .LBB6_5
-; CHECK-RV64-NEXT:  .LBB6_3: # %dest_2
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # dest 2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  .LBB6_4: # %dest_3
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # dest 3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use ra
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a0
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a1
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use a7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s2
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s7
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s8
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s9
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s10
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use s11
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t3
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t4
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t5
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    # reg use t6
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s8, 48(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; CHECK-RV64-NEXT:    addi sp, sp, 128
-; CHECK-RV64-NEXT:    ret
-; CHECK-RV64-NEXT:  .LBB6_5: # %cond_3
-; CHECK-RV64-NEXT:    sext.w t5, t2
-; CHECK-RV64-NEXT:    sext.w t6, t1
-; CHECK-RV64-NEXT:    beq t6, t5, .LBB6_4
-; CHECK-RV64-NEXT:  # %bb.6: # %space
-; CHECK-RV64-NEXT:    #APP
-; CHECK-RV64-NEXT:    .zero 1048576
-; CHECK-RV64-NEXT:    #NO_APP
-; CHECK-RV64-NEXT:  # %bb.7: # %space
-; CHECK-RV64-NEXT:    jump .LBB6_1, t5
-entry:
-  %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
-  %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
-  %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
-  %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
-  %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
-  %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
-  %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
-  %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
-  %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
-  %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
-  %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
-  %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
-  %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
-  %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
-  %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
-  %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
-  %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
-  %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
-  %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
-  %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
-  %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
-  %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
-  %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
-  %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
-  %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
-  %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
-  %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
-  %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
-
-  br label %cond_1
-
-cond_1:
-  %cmp1 = icmp eq i32 %t5, %t6
-  br i1 %cmp1, label %dest_1, label %cond_2
-
-cond_2:
-  %cmp2 = icmp eq i32 %t3, %t4
-  br i1 %cmp2, label %dest_2, label %cond_3
-
-cond_3:
-  %cmp3 = icmp eq i32 %t1, %t2
-  br i1 %cmp3, label %dest_3, label %space
-
-space:
-  call void asm sideeffect ".space 1048576", ""()
-  br label %dest_1
-
-dest_1:
-  call void asm sideeffect "# dest 1", ""()
-  br label %dest_2
-
-dest_2:
-  call void asm sideeffect "# dest 2", ""()
-  br label %dest_3
-
-dest_3:
-  call void asm sideeffect "# dest 3", ""()
-  br label %tail
-
-tail:
-  call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
-  call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
-  call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
-  call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
-  call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
-  call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
-  call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
-  call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
-  call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
-  call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
-  call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
-  call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
-  call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
-  call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
-  call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
-  call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
-  call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
-  call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
-  call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
-  call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
-  call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
-  call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
-  call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
-  call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
-  call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
-  call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
-  call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
-  call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
-
-  ret void
-}
diff --git a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
new file mode 100644
index 00000000000000..4a85c34dcc378f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @test_Pr_wide_scalar_in(i32 noundef %p) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_in:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    srai a1, a0, 31
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %p2 = sext i32 %p to i64
+  call void asm sideeffect "/* $0 */", "^Pr"(i64 %p2)
+  ret void
+}
+
+; define i32 @test_Pr_wide_scalar_out() nounwind {
+; entry:
+;   %0 = call i64 asm sideeffect "", "=^Pr"()
+;   %1 = trunc i64 %0 to i32
+;   ret i32 %1
+; }
+
+
+; define i64 @test_Pr_wide_scalar(i64 noundef %p) nounwind {
+; entry:
+;   %0 = call i64 asm sideeffect "", "=^Pr,^Pr"(i64 %p)
+;   ret i64 %0
+; }
diff --git a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
new file mode 100644
index 00000000000000..28600307f01434
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define void @test_Pr_wide_scalar_in(i64 noundef %p) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_in:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    srai a1, a0, 63
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %p2 = sext i64 %p to i128
+  call void asm sideeffect "/* $0 */", "^Pr"(i128 %p2)
+  ret void
+}
+
+; define i64 @test_Pr_wide_scalar_out() nounwind {
+; entry:
+;   %0 = call i128 asm sideeffect "/* $0 */", "=^Pr"()
+;   %1 = trunc i128 %0 to i64
+;   ret i64 %1
+; }
+
+; define i128 @test_Pr_wide_scalar(i128 noundef %p) nounwind {
+; entry:
+;   %0 = call i128 asm sideeffect "", "=^Pr,^Pr"(i128 %p)
+;   ret i128 %0
+; }

>From 75a57397bb645eb41035d33a56381cb65c8bc0bc Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 06:59:23 -0700
Subject: [PATCH 3/5] [RISCV] Inline Asm Pairs Progress

---
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   | 18 +++++++-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 46 +++++++------------
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |  4 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoD.td      |  2 +-
 .../CodeGen/RISCV/rv32-inline-asm-pairs.ll    | 45 ++++++++++--------
 .../CodeGen/RISCV/rv64-inline-asm-pairs.ll    | 44 ++++++++++--------
 6 files changed, 88 insertions(+), 71 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index a1c01823784dee..ddbc777ac33a5e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -953,7 +953,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     ReplaceNode(Node, Res);
     return;
   }
-  case RISCVISD::BuildXLenPair: {
+  case RISCVISD::BuildPairGPR: {
     SDValue Ops[] = {
         CurDAG->getTargetConstant(RISCV::GPRPairRegClassID, DL, MVT::i32),
         Node->getOperand(0),
@@ -966,6 +966,22 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     ReplaceNode(Node, N);
     return;
   }
+  case RISCVISD::SplitPairGPR: {
+    if (!SDValue(Node, 0).use_empty()) {
+      SDValue Lo = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_even, DL, VT,
+                                                  Node->getOperand(0));
+      ReplaceUses(SDValue(Node, 0), Lo);
+    }
+
+    if (!SDValue(Node, 1).use_empty()) {
+      SDValue Hi = CurDAG->getTargetExtractSubreg(RISCV::sub_gpr_odd, DL, VT,
+                                                  Node->getOperand(0));
+      ReplaceUses(SDValue(Node, 1), Hi);
+    }
+
+    CurDAG->RemoveDeadNode(Node);
+    return;
+  }
   case RISCVISD::BuildPairF64: {
     if (!Subtarget->hasStdExtZdinx())
       break;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 551094bcfe6b43..b53f3a1b71e28d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -298,6 +298,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setCondCodeAction(ISD::SETLE, XLenVT, Expand);
   }
 
+  if (Subtarget.isRV64())
+    setOperationAction(ISD::BITCAST, MVT::i128, Custom);
+  else if (Subtarget.isRV32())
+    setOperationAction(ISD::BITCAST, MVT::i64, Custom);
+
   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
 
   setOperationAction(ISD::VASTART, MVT::Other, Custom);
@@ -6418,6 +6423,11 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
       std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, MVT::i32, MVT::i32);
       return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
     }
+    if (VT == Subtarget.getXLenPairVT() && Op0VT.isScalarInteger() && Op0VT.getSizeInBits() == 2 * Subtarget.getXLen()) {
+      SDValue Lo, Hi;
+      std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, XLenVT, XLenVT);
+      return DAG.getNode(RISCVISD::BuildPairGPR, DL, Subtarget.getXLenPairVT(), Lo, Hi);
+    }
 
     // Consider other scalar<->scalar casts as legal if the types are legal.
     // Otherwise expand them.
@@ -12859,6 +12869,10 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
                                    NewReg.getValue(0), NewReg.getValue(1));
       Results.push_back(RetReg);
+    } else if (VT.isInteger() && VT.getSizeInBits() == 2 * Subtarget.getXLen() && Op0VT == Subtarget.getXLenPairVT()) {
+      SDValue NewReg = DAG.getNode(RISCVISD::SplitPairGPR, DL, DAG.getVTList(XLenVT, XLenVT), Op0);
+      SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, VT, NewReg.getValue(0), NewReg.getValue(1));
+      Results.push_back(RetReg);
     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
                isTypeLegal(Op0VT)) {
       // Custom-legalize bitcasts from fixed-length vector types to illegal
@@ -20103,8 +20117,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(TAIL)
   NODE_NAME_CASE(SELECT_CC)
   NODE_NAME_CASE(BR_CC)
-  NODE_NAME_CASE(BuildXLenPair)
-  NODE_NAME_CASE(SplitXLenPair)
+  NODE_NAME_CASE(BuildPairGPR)
+  NODE_NAME_CASE(SplitPairGPR)
   NODE_NAME_CASE(BuildPairF64)
   NODE_NAME_CASE(SplitF64)
   NODE_NAME_CASE(ADD_LO)
@@ -21288,24 +21302,6 @@ bool RISCVTargetLowering::splitValueIntoRegisterParts(
     return true;
   }
 
-  if (NumParts == 1 && ValueVT == MVT::i128 && PartVT == MVT::riscv_i64_pair) {
-    // Used on inputs *to* inline assembly.
-    SDValue Lo, Hi;
-    std::tie(Lo, Hi) = DAG.SplitScalar(Val, DL, MVT::i64, MVT::i64);
-    Parts[0] = DAG.getNode(RISCVISD::BuildXLenPair, DL, PartVT, Lo, Hi);
-    return true;
-  }
-
-  if (NumParts == 1 && ValueVT == MVT::i64 && PartVT == MVT::riscv_i32_pair) {
-    // Used on inputs *to* inline assembly.
-    SDValue Lo, Hi;
-    std::tie(Lo, Hi) = DAG.SplitScalar(Val, DL, MVT::i32, MVT::i32);
-    Parts[0] = DAG.getNode(RISCVISD::BuildXLenPair, DL, PartVT, Lo, Hi);
-    return true;
-  }
-
-  // || (ValueVT == MVT::i64 && PartVT == MVT::riscv_i32_pair)
-
   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
     LLVMContext &Context = *DAG.getContext();
     EVT ValueEltVT = ValueVT.getVectorElementType();
@@ -21375,16 +21371,6 @@ SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
     return Val;
   }
 
-  // if (/*ValueVT == MVT::riscv_i64_pair &&*/ PartVT == MVT::riscv_i64_pair) {
-  //   // Used on outputs *from* inline assembly.
-  //   SDValue Val = Parts[0];
-  //   SDValue Pair = DAG.getNode(RISCVISD::SplitXLenPair, DL, {MVT::i64,
-  //   MVT::i64}, Val); return DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT,
-  //   Pair.getValue(0), Pair.getValue(1));
-  // }
-
-  // (PartVT == MVT::i64 && ValueVT == MVT::riscv_i32_pair)
-
   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
     LLVMContext &Context = *DAG.getContext();
     SDValue Val = Parts[0];
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index a134964199118b..42667a7d5f9111 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -46,9 +46,9 @@ enum NodeType : unsigned {
   BR_CC,
 
   /// Turn a pair of `i<xlen>`s into a `riscv_i<xlen>_pair`.
-  BuildXLenPair,
+  BuildPairGPR,
   /// Turn a `riscv_i<xlen>_pair` into a pair of `i<xlen>`s.
-  SplitXLenPair,
+  SplitPairGPR,
 
   /// Turn a pair of `i32`s into an `f64`. Needed for rv32d/ilp32
   BuildPairF64,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index c3b02e4045c3c1..34f86534ce9fd3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -491,7 +491,7 @@ def : StPat<store, FSD, FPR64, f64>;
 /// Pseudo-instructions needed for the soft-float ABI with RV32D
 
 // Moves two GPRs to an FPR.
-let usesCustomInserter = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+let usesCustomInserter = 1 in
 def BuildPairF64Pseudo
     : Pseudo<(outs FPR64:$dst), (ins GPR:$src1, GPR:$src2),
              [(set FPR64:$dst, (RISCVBuildPairF64 GPR:$src1, GPR:$src2))]>;
diff --git a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
index 4a85c34dcc378f..69da11bbfd0685 100644
--- a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
@@ -2,30 +2,37 @@
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s
 
-define void @test_Pr_wide_scalar_in(i32 noundef %p) nounwind {
-; CHECK-LABEL: test_Pr_wide_scalar_in:
+define i64 @test_Pr_wide_scalar_simple(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_simple:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    srai a1, a0, 31
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    # a0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    ret
 entry:
-  %p2 = sext i32 %p to i64
-  call void asm sideeffect "/* $0 */", "^Pr"(i64 %p2)
-  ret void
+  %1 = call i64 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i64 %0)
+  ret i64 %1
 }
 
-; define i32 @test_Pr_wide_scalar_out() nounwind {
-; entry:
-;   %0 = call i64 asm sideeffect "", "=^Pr"()
-;   %1 = trunc i64 %0 to i32
-;   ret i32 %1
-; }
-
-
-; define i64 @test_Pr_wide_scalar(i64 noundef %p) nounwind {
-; entry:
-;   %0 = call i64 asm sideeffect "", "=^Pr,^Pr"(i64 %p)
-;   ret i64 %0
-; }
+define i32 @test_Pr_wide_scalar_complex(i32 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_complex:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    srai a1, a0, 31
+; CHECK-NEXT:    or a1, a1, a0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    ret
+entry:
+  %1 = sext i32 %0 to i64
+  %2 = zext i32 %0 to i64
+  %3 = shl i64 %2, 32
+  %4 = or i64 %1, %3
+  %5 = call i64 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i64 %4)
+  %6 = trunc i64 %5 to i32
+  %7 = lshr i64 %5, 32
+  %8 = trunc i64 %7 to i32
+  %9 = or i32 %6, %8
+  ret i32 %9
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
index 28600307f01434..01bb55f1ef274f 100644
--- a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
@@ -2,29 +2,37 @@
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s
 
-define void @test_Pr_wide_scalar_in(i64 noundef %p) nounwind {
-; CHECK-LABEL: test_Pr_wide_scalar_in:
+define i128 @test_Pr_wide_scalar_simple(i128 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_simple:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    srai a1, a0, 63
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    # a0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    ret
 entry:
-  %p2 = sext i64 %p to i128
-  call void asm sideeffect "/* $0 */", "^Pr"(i128 %p2)
-  ret void
+  %1 = call i128 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i128 %0)
+  ret i128 %1
 }
 
-; define i64 @test_Pr_wide_scalar_out() nounwind {
-; entry:
-;   %0 = call i128 asm sideeffect "/* $0 */", "=^Pr"()
-;   %1 = trunc i128 %0 to i64
-;   ret i64 %1
-; }
-
-; define i128 @test_Pr_wide_scalar(i128 noundef %p) nounwind {
-; entry:
-;   %0 = call i128 asm sideeffect "", "=^Pr,^Pr"(i128 %p)
-;   ret i128 %0
-; }
+define i64 @test_Pr_wide_scalar_complex(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_complex:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    srai a1, a0, 63
+; CHECK-NEXT:    or a1, a1, a0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    # a0
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    ret
+entry:
+  %1 = sext i64 %0 to i128
+  %2 = zext i64 %0 to i128
+  %3 = shl i128 %2, 64
+  %4 = or i128 %1, %3
+  %5 = call i128 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i128 %4)
+  %6 = trunc i128 %5 to i64
+  %7 = lshr i128 %5, 64
+  %8 = trunc i128 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}

>From b82c7a7772a200cf0eb2d00aea5c1817f05587c6 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 07:04:15 -0700
Subject: [PATCH 4/5] fixup! [RISCV] Inline Asm Pairs Progress

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b53f3a1b71e28d..9fdf0cb59e2390 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6423,10 +6423,12 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
       std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, MVT::i32, MVT::i32);
       return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
     }
-    if (VT == Subtarget.getXLenPairVT() && Op0VT.isScalarInteger() && Op0VT.getSizeInBits() == 2 * Subtarget.getXLen()) {
+    if (VT == Subtarget.getXLenPairVT() && Op0VT.isScalarInteger() &&
+        Op0VT.getSizeInBits() == 2 * Subtarget.getXLen()) {
       SDValue Lo, Hi;
       std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, XLenVT, XLenVT);
-      return DAG.getNode(RISCVISD::BuildPairGPR, DL, Subtarget.getXLenPairVT(), Lo, Hi);
+      return DAG.getNode(RISCVISD::BuildPairGPR, DL, Subtarget.getXLenPairVT(),
+                         Lo, Hi);
     }
 
     // Consider other scalar<->scalar casts as legal if the types are legal.
@@ -12869,9 +12871,13 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
                                    NewReg.getValue(0), NewReg.getValue(1));
       Results.push_back(RetReg);
-    } else if (VT.isInteger() && VT.getSizeInBits() == 2 * Subtarget.getXLen() && Op0VT == Subtarget.getXLenPairVT()) {
-      SDValue NewReg = DAG.getNode(RISCVISD::SplitPairGPR, DL, DAG.getVTList(XLenVT, XLenVT), Op0);
-      SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, VT, NewReg.getValue(0), NewReg.getValue(1));
+    } else if (VT.isInteger() &&
+               VT.getSizeInBits() == 2 * Subtarget.getXLen() &&
+               Op0VT == Subtarget.getXLenPairVT()) {
+      SDValue NewReg = DAG.getNode(RISCVISD::SplitPairGPR, DL,
+                                   DAG.getVTList(XLenVT, XLenVT), Op0);
+      SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, VT, NewReg.getValue(0),
+                                   NewReg.getValue(1));
       Results.push_back(RetReg);
     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
                isTypeLegal(Op0VT)) {

>From 040a7c43f31d9552ade63567698afd0d473664e1 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Tue, 22 Oct 2024 09:58:53 -0700
Subject: [PATCH 5/5] fixup! fixup! [RISCV] Inline Asm Pairs Progress

---
 .../CodeGen/RISCV/rv32-inline-asm-pairs.ll    | 36 +++++++++----------
 .../CodeGen/RISCV/rv64-inline-asm-pairs.ll    | 36 +++++++++----------
 2 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
index 69da11bbfd0685..cdfc0a78f15a2a 100644
--- a/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
@@ -6,33 +6,33 @@ define i64 @test_Pr_wide_scalar_simple(i64 noundef %0) nounwind {
 ; CHECK-LABEL: test_Pr_wide_scalar_simple:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    #APP
-; CHECK-NEXT:    # a0
+; CHECK-NEXT:    # a2 <- a0
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
 ; CHECK-NEXT:    ret
 entry:
-  %1 = call i64 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i64 %0)
+  %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i64 %0)
   ret i64 %1
 }
 
-define i32 @test_Pr_wide_scalar_complex(i32 noundef %0) nounwind {
-; CHECK-LABEL: test_Pr_wide_scalar_complex:
+define i32 @test_Pr_wide_scalar_with_ops(i32 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    srai a1, a0, 31
-; CHECK-NEXT:    or a1, a1, a0
+; CHECK-NEXT:    mv a1, a0
 ; CHECK-NEXT:    #APP
-; CHECK-NEXT:    # a0
+; CHECK-NEXT:    # a2 <- a0
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    or a0, a2, a3
 ; CHECK-NEXT:    ret
 entry:
-  %1 = sext i32 %0 to i64
-  %2 = zext i32 %0 to i64
-  %3 = shl i64 %2, 32
-  %4 = or i64 %1, %3
-  %5 = call i64 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i64 %4)
-  %6 = trunc i64 %5 to i32
-  %7 = lshr i64 %5, 32
-  %8 = trunc i64 %7 to i32
-  %9 = or i32 %6, %8
-  ret i32 %9
+  %1 = zext i32 %0 to i64
+  %2 = shl i64 %1, 32
+  %3 = or i64 %1, %2
+  %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i64 %3)
+  %5 = trunc i64 %4 to i32
+  %6 = lshr i64 %4, 32
+  %7 = trunc i64 %6 to i32
+  %8 = or i32 %5, %7
+  ret i32 %8
 }
diff --git a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
index 01bb55f1ef274f..bbaef784249bcc 100644
--- a/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
@@ -6,33 +6,33 @@ define i128 @test_Pr_wide_scalar_simple(i128 noundef %0) nounwind {
 ; CHECK-LABEL: test_Pr_wide_scalar_simple:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    #APP
-; CHECK-NEXT:    # a0
+; CHECK-NEXT:    # a2 <- a0
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    mv a0, a2
+; CHECK-NEXT:    mv a1, a3
 ; CHECK-NEXT:    ret
 entry:
-  %1 = call i128 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i128 %0)
+  %1 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i128 %0)
   ret i128 %1
 }
 
-define i64 @test_Pr_wide_scalar_complex(i64 noundef %0) nounwind {
-; CHECK-LABEL: test_Pr_wide_scalar_complex:
+define i64 @test_Pr_wide_scalar_with_ops(i64 noundef %0) nounwind {
+; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    srai a1, a0, 63
-; CHECK-NEXT:    or a1, a1, a0
+; CHECK-NEXT:    mv a1, a0
 ; CHECK-NEXT:    #APP
-; CHECK-NEXT:    # a0
+; CHECK-NEXT:    # a2 <- a0
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    or a0, a2, a3
 ; CHECK-NEXT:    ret
 entry:
-  %1 = sext i64 %0 to i128
-  %2 = zext i64 %0 to i128
-  %3 = shl i128 %2, 64
-  %4 = or i128 %1, %3
-  %5 = call i128 asm sideeffect "/* $0 */", "=^Pr,^Pr"(i128 %4)
-  %6 = trunc i128 %5 to i64
-  %7 = lshr i128 %5, 64
-  %8 = trunc i128 %7 to i64
-  %9 = or i64 %6, %8
-  ret i64 %9
+  %1 = zext i64 %0 to i128
+  %2 = shl i128 %1, 64
+  %3 = or i128 %1, %2
+  %4 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^Pr,^Pr"(i128 %3)
+  %5 = trunc i128 %4 to i64
+  %6 = lshr i128 %4, 64
+  %7 = trunc i128 %6 to i64
+  %8 = or i64 %5, %7
+  ret i64 %8
 }



More information about the cfe-commits mailing list