[clang] [llvm] [RISCV][Zicfilp] Enable Zicfilp CFI compiler behaviors by looking at module flags (PR #152121)
Ming-Yi Lai via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 7 19:12:52 PST 2025
https://github.com/mylai-mtk updated https://github.com/llvm/llvm-project/pull/152121
>From e135e6284454b876f95f1406c62dc68f6bd090c6 Mon Sep 17 00:00:00 2001
From: Ming-Yi Lai <ming-yi.lai at mediatek.com>
Date: Tue, 5 Aug 2025 17:31:54 +0800
Subject: [PATCH 1/2] [RISCV][Zicfilp] Codegen LPAD insns by looking at module
flags
Expected Behavior:
Stop codegening LPAD insns by testing if the target has the Zicfilp extension
and instead, codegen LPAD insns if the LLVM module has all of these flags:
+ `cf-protection-branch`: Needs to be a non-zero integer (which means `true`)
+ `cf-branch-label-scheme`: Needs to be `unlabeled`
Context:
In clang, Zicfilp-based control flow integrity (the `unlabeled` scheme) can now
be enabled by giving the `-fcf-protection=branch
-mcf-branch-label-scheme=unlabeled` options. With these options, the clang
frontend adds the above-mentioned flags to LLVM modules. Here we want to align
LPAD insn codegen to be enabled by the semantics of those LLVM module flags,
instead of relying on the inaccurate indicator of whether the Zicfilp extension
is available, so the toolchain's behavior is more streamlined and expected.
Also, since LPAD insns can be executed regardless of whether Zicfilp is
available in target or not (due to LPAD insn being encoded as a standard hint
insn), clang accepts the above-mentioned CLI options even if Zicfilp is not
enabled and expects backend to still generate LPAD insns. This patch enables
LPAD insn generation in such cases.
---
clang/lib/Driver/ToolChains/Arch/RISCV.cpp | 29 +
clang/test/Driver/riscv-features.c | 19 +
.../RISCV/MCTargetDesc/RISCVBaseInfo.cpp | 4 +
llvm/lib/Target/RISCV/RISCVCallingConv.cpp | 4 +-
llvm/lib/Target/RISCV/RISCVFeatures.td | 13 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 18 +-
.../RISCV/RISCVIndirectBranchTracking.cpp | 10 +-
llvm/lib/Target/RISCV/RISCVInstrInfo.td | 16 +-
.../lib/Target/RISCV/RISCVLandingPadSetup.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVSubtarget.h | 14 +
llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 35 +
.../CodeGen/RISCV/branch-relaxation-rv32.ll | 788 ------------
.../CodeGen/RISCV/branch-relaxation-rv64.ll | 791 ------------
llvm/test/CodeGen/RISCV/calls.ll | 186 ---
llvm/test/CodeGen/RISCV/features-info.ll | 2 +
.../test/CodeGen/RISCV/jumptable-swguarded.ll | 48 +-
llvm/test/CodeGen/RISCV/lpad.ll | 99 +-
.../CodeGen/RISCV/machine-outliner-lpad.ll | 14 +-
llvm/test/CodeGen/RISCV/nest-register.ll | 26 -
.../test/CodeGen/RISCV/rv64-trampoline-cfi.ll | 13 +-
llvm/test/CodeGen/RISCV/tail-calls.ll | 270 -----
.../RISCV/zicfilp-disabled-indirect-branch.ll | 40 +
.../CodeGen/RISCV/zicfilp-indirect-branch.ll | 21 +-
...icfilp-unlabeled-branch-relaxation-rv32.ll | 1058 ++++++++++++++++
...icfilp-unlabeled-branch-relaxation-rv64.ll | 1059 +++++++++++++++++
.../CodeGen/RISCV/zicfilp-unlabeled-calls.ll | 253 ++++
.../RISCV/zicfilp-unlabeled-nest-register.ll | 38 +
.../RISCV/zicfilp-unlabeled-tail-calls.ll | 434 +++++++
.../branch-cfi/Inputs/rv32-foo-disabled.ll | 18 +
.../Inputs/rv32-foo-unknown-scheme.ll | 20 +
.../branch-cfi/Inputs/rv32-foo-unlabeled.ll | 20 +
.../branch-cfi/Inputs/rv64-foo-disabled.ll | 18 +
.../Inputs/rv64-foo-unknown-scheme.ll | 20 +
.../branch-cfi/Inputs/rv64-foo-unlabeled.ll | 20 +
.../LTO/RISCV/branch-cfi/rv32-unlabeled.ll | 43 +
.../LTO/RISCV/branch-cfi/rv64-unlabeled.ll | 43 +
36 files changed, 3304 insertions(+), 2202 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/zicfilp-disabled-indirect-branch.ll
create mode 100644 llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll
create mode 100644 llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll
create mode 100644 llvm/test/CodeGen/RISCV/zicfilp-unlabeled-calls.ll
create mode 100644 llvm/test/CodeGen/RISCV/zicfilp-unlabeled-nest-register.ll
create mode 100644 llvm/test/CodeGen/RISCV/zicfilp-unlabeled-tail-calls.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-disabled.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unknown-scheme.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unlabeled.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-disabled.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unknown-scheme.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unlabeled.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/rv32-unlabeled.ll
create mode 100644 llvm/test/LTO/RISCV/branch-cfi/rv64-unlabeled.ll
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 7fda8ea50223d..671ed42f85462 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -169,6 +169,35 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("+unaligned-vector-mem");
}
+ if (const Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
+ const StringRef CFProtection{A->getValue()};
+ const bool CFProtectionBranch =
+ CFProtection == "full" || CFProtection == "branch";
+ if (CFProtectionBranch) {
+ bool FuncSig;
+ if (const Arg *SA =
+ Args.getLastArg(options::OPT_mcf_branch_label_scheme_EQ)) {
+ const StringRef Scheme{SA->getValue()};
+ if (Scheme == "unlabeled") {
+ FuncSig = false;
+ } else {
+ assert(Scheme == "func-sig" &&
+ "-mcf-branch-label-scheme should be either `unlabeled` or "
+ "`func-sig`");
+ FuncSig = true;
+ }
+ } else {
+ // `func-sig` is assumed if `-mcf-branch-label-scheme` is not given.
+ FuncSig = true;
+ }
+
+ if (FuncSig)
+ Features.push_back("+zicfilp-func-sig");
+ else
+ Features.push_back("+zicfilp-unlabeled");
+ }
+ }
+
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
handleTargetFeaturesGroup(D, Triple, Args, Features,
diff --git a/clang/test/Driver/riscv-features.c b/clang/test/Driver/riscv-features.c
index 97736ff81c799..1070962982164 100644
--- a/clang/test/Driver/riscv-features.c
+++ b/clang/test/Driver/riscv-features.c
@@ -90,3 +90,22 @@
// RVE: "-target-feature" "+e"
// RVE-SAME: "-target-feature" "-i"
+
+// RUN: %clang --target=riscv32 -fcf-protection=full -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv32 -fcf-protection=full -mcf-branch-label-scheme=func-sig -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv32 -fcf-protection=full -mcf-branch-label-scheme=unlabeled -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-UNLABELED
+// RUN: %clang --target=riscv32 -fcf-protection=branch -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv32 -fcf-protection=branch -mcf-branch-label-scheme=func-sig -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv32 -fcf-protection=branch -mcf-branch-label-scheme=unlabeled -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-UNLABELED
+// RUN: %clang --target=riscv64 -fcf-protection=full -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv64 -fcf-protection=full -mcf-branch-label-scheme=func-sig -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv64 -fcf-protection=full -mcf-branch-label-scheme=unlabeled -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-UNLABELED
+// RUN: %clang --target=riscv64 -fcf-protection=branch -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv64 -fcf-protection=branch -mcf-branch-label-scheme=func-sig -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-FUNC-SIG
+// RUN: %clang --target=riscv64 -fcf-protection=branch -mcf-branch-label-scheme=unlabeled -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=ZICFILP-UNLABELED
+// ZICFILP-FUNC-SIG-NOT: "-target-feature" "-zicfilp-unlabeled"
+// ZICFILP-FUNC-SIG: "-target-feature" "+zicfilp-func-sig"
+// ZICFILP-FUNC-SIG-NOT: "-target-feature" "-zicfilp-unlabeled"
+// ZICFILP-UNLABELED-NOT: "-target-feature" "-zicfilp-func-sig"
+// ZICFILP-UNLABELED: "-target-feature" "+zicfilp-unlabeled"
+// ZICFILP-UNLABELED-NOT: "-target-feature" "-zicfilp-func-sig"
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
index 5abb5461f74b3..065ff48a38ab4 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
@@ -134,6 +134,10 @@ void validate(const Triple &TT, const FeatureBitset &FeatureBits) {
if (FeatureBits[RISCV::Feature32Bit] &&
FeatureBits[RISCV::Feature64Bit])
reportFatalUsageError("RV32 and RV64 can't be combined");
+ if (FeatureBits[RISCV::FeatureZicfilpFuncSig] &&
+ FeatureBits[RISCV::FeatureZicfilpUnlabeled])
+ reportFatalUsageError(
+ "+zicfilp-func-sig and +zicfilp-unlabeled can't be combined");
}
llvm::Expected<std::unique_ptr<RISCVISAInfo>>
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
index 78f47794a5b66..5d89128573d73 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
@@ -337,9 +337,7 @@ bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
// Static chain parameter must not be passed in normal argument registers,
// so we assign t2/t3 for it as done in GCC's
// __builtin_call_with_static_chain
- bool HasCFBranch =
- Subtarget.hasStdExtZicfilp() &&
- MF.getFunction().getParent()->getModuleFlag("cf-protection-branch");
+ bool HasCFBranch = Subtarget.hasZicfilpCFI();
// Normal: t2, Branch control flow protection: t3
const auto StaticChainReg = HasCFBranch ? RISCV::X28 : RISCV::X7;
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 0c75312847c87..3951b72aa8de5 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -176,8 +176,6 @@ def FeatureStdExtZicfilp
def HasStdExtZicfilp : Predicate<"Subtarget->hasStdExtZicfilp()">,
AssemblerPredicate<(all_of FeatureStdExtZicfilp),
"'Zicfilp' (Landing pad)">;
-def NoStdExtZicfilp : Predicate<"!Subtarget->hasStdExtZicfilp()">,
- AssemblerPredicate<(all_of (not FeatureStdExtZicfilp))>;
def FeatureStdExtZicfiss
: RISCVExperimentalExtension<1, 0, "Shadow stack",
@@ -1976,3 +1974,14 @@ def TuneSiFive7 : SubtargetFeature<"sifive7", "RISCVProcFamily", "SiFive7",
def TuneVentanaVeyron : SubtargetFeature<"ventana-veyron", "RISCVProcFamily", "VentanaVeyron",
"Ventana Veyron-Series processors">;
+// Zicfilp-based CFI
+def FeatureZicfilpUnlabeled
+ : SubtargetFeature<
+ "zicfilp-unlabeled", "RISCVZicfilpCFIScheme", "ZicfilpUnlabeled",
+ "Enforce forward-edge control-flow integrity with ZICFILP-unlabeled">;
+def FeatureZicfilpFuncSig
+ : SubtargetFeature<
+ "zicfilp-func-sig", "RISCVZicfilpCFIScheme", "ZicfilpFuncSig",
+ "Enforce forward-edge control-flow integrity with ZICFILP-func-sig">;
+def HasZicfilpCFI : Predicate<"Subtarget->hasZicfilpCFI()">;
+def NoZicfilpCFI : Predicate<"!Subtarget->hasZicfilpCFI()">;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3f58e39665c02..affa52ab71b2e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8969,10 +8969,7 @@ SDValue RISCVTargetLowering::lowerINIT_TRAMPOLINE(SDValue Op,
// 28: <FunctionAddressOffset>
// 36:
- const bool HasCFBranch =
- Subtarget.hasStdExtZicfilp() &&
- DAG.getMachineFunction().getFunction().getParent()->getModuleFlag(
- "cf-protection-branch");
+ const bool HasCFBranch = Subtarget.hasZicfilpCFI();
const unsigned StaticChainIdx = HasCFBranch ? 5 : 4;
const unsigned StaticChainOffset = StaticChainIdx * 4;
const unsigned FunctionAddressOffset = StaticChainOffset + 8;
@@ -24082,11 +24079,10 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Use software guarded branch for large code model non-indirect calls
// Tail call to external symbol will have a null CLI.CB and we need another
// way to determine the callsite type
- bool NeedSWGuarded = false;
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- Subtarget.hasStdExtZicfilp() &&
- ((CLI.CB && !CLI.CB->isIndirectCall()) || CalleeIsLargeExternalSymbol))
- NeedSWGuarded = true;
+ const bool NeedSWGuarded =
+ getTargetMachine().getCodeModel() == CodeModel::Large &&
+ Subtarget.hasZicfilpCFI() &&
+ ((CLI.CB && !CLI.CB->isIndirectCall()) || CalleeIsLargeExternalSymbol);
if (IsTailCall) {
MF.getFrameInfo().setHasTailCall();
@@ -25722,8 +25718,8 @@ SDValue RISCVTargetLowering::expandIndirectJTBranch(const SDLoc &dl,
SDValue Value, SDValue Addr,
int JTI,
SelectionDAG &DAG) const {
- if (Subtarget.hasStdExtZicfilp()) {
- // When Zicfilp enabled, we need to use software guarded branch for jump
+ if (Subtarget.hasZicfilpCFI()) {
+ // When Zicfilp CFI is used, we need to use software guarded branch for jump
// table branch.
SDValue Chain = Value;
// Jump table debug info is only needed if CodeView is enabled.
diff --git a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
index 0fc139a30ae76..7f2dc2e29b6d4 100644
--- a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
+++ b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/Support/Error.h"
#define DEBUG_TYPE "riscv-indirect-branch-tracking"
#define PASS_NAME "RISC-V Indirect Branch Tracking"
@@ -76,10 +77,15 @@ static bool isCallReturnTwice(const MachineOperand &MOp) {
bool RISCVIndirectBranchTracking::runOnMachineFunction(MachineFunction &MF) {
const auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
- const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
- if (!Subtarget.hasStdExtZicfilp())
+ if (!Subtarget.hasZicfilpCFI())
return false;
+ const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
+
+ if (Subtarget.getZicfilpCFIScheme() != RISCVSubtarget::ZicfilpUnlabeled)
+ reportFatalUsageError(
+ "only cf-branch-label-scheme=unlabeled is supported for now");
+
uint32_t FixedLabel = 0;
if (PreferredLandingPadLabel.getNumOccurrences() > 0) {
if (!isUInt<20>(PreferredLandingPadLabel))
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 9a4eb12ca0eb0..1d1cfdfb8c270 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1758,12 +1758,12 @@ let isBarrier = 1, isBranch = 1, isTerminator = 1 in
def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,
PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>;
-let Predicates = [NoStdExtZicfilp],
+let Predicates = [NoZicfilpCFI],
isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12_lo:$imm12), []>,
PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12_lo:$imm12)>;
-let Predicates = [HasStdExtZicfilp],
+let Predicates = [HasZicfilpCFI],
isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in {
def PseudoBRINDNonX7 : Pseudo<(outs), (ins GPRJALRNonX7:$rs1, simm12_lo:$imm12), []>,
PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12_lo:$imm12)>;
@@ -1773,7 +1773,7 @@ def PseudoBRINDX7 : Pseudo<(outs), (ins GPRX7:$rs1, simm12_lo:$imm12), []>,
// For Zicfilp, need to avoid using X7/T2 for indirect branches which need
// landing pad.
-let Predicates = [HasStdExtZicfilp] in {
+let Predicates = [HasZicfilpCFI] in {
def : Pat<(brind GPRJALRNonX7:$rs1), (PseudoBRINDNonX7 GPRJALRNonX7:$rs1, 0)>;
def : Pat<(brind (add GPRJALRNonX7:$rs1, simm12_lo:$imm12)),
(PseudoBRINDNonX7 GPRJALRNonX7:$rs1, simm12_lo:$imm12)>;
@@ -1783,7 +1783,7 @@ def : Pat<(riscv_sw_guarded_brind (add GPRX7:$rs1, simm12_lo:$imm12)),
(PseudoBRINDX7 GPRX7:$rs1, simm12_lo:$imm12)>;
}
-let Predicates = [NoStdExtZicfilp] in {
+let Predicates = [NoZicfilpCFI] in {
def : Pat<(brind GPRJALR:$rs1), (PseudoBRIND GPRJALR:$rs1, 0)>;
def : Pat<(brind (add GPRJALR:$rs1, simm12_lo:$imm12)),
(PseudoBRIND GPRJALR:$rs1, simm12_lo:$imm12)>;
@@ -1820,11 +1820,11 @@ let Predicates = [HasStdExtSmrnmi] in
def : Pat<(riscv_mnret_glue), (MNRET)>;
let isCall = 1, Defs = [X1] in {
-let Predicates = [NoStdExtZicfilp] in
+let Predicates = [NoZicfilpCFI] in
def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1),
[(riscv_call GPRJALR:$rs1)]>,
PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
-let Predicates = [HasStdExtZicfilp] in {
+let Predicates = [HasZicfilpCFI] in {
def PseudoCALLIndirectNonX7 : Pseudo<(outs), (ins GPRJALRNonX7:$rs1),
[(riscv_call GPRJALRNonX7:$rs1)]>,
PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
@@ -1849,11 +1849,11 @@ def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), [],
Sched<[WriteIALU, WriteJalr, ReadJalr]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in {
-let Predicates = [NoStdExtZicfilp] in
+let Predicates = [NoZicfilpCFI] in
def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1),
[(riscv_tail GPRTC:$rs1)]>,
PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>;
-let Predicates = [HasStdExtZicfilp] in {
+let Predicates = [HasZicfilpCFI] in {
def PseudoTAILIndirectNonX7 : Pseudo<(outs), (ins GPRTCNonX7:$rs1),
[(riscv_tail GPRTCNonX7:$rs1)]>,
PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>;
diff --git a/llvm/lib/Target/RISCV/RISCVLandingPadSetup.cpp b/llvm/lib/Target/RISCV/RISCVLandingPadSetup.cpp
index 072694e123084..9b5cd1f5d0b80 100644
--- a/llvm/lib/Target/RISCV/RISCVLandingPadSetup.cpp
+++ b/llvm/lib/Target/RISCV/RISCVLandingPadSetup.cpp
@@ -48,7 +48,7 @@ bool RISCVLandingPadSetup::runOnMachineFunction(MachineFunction &MF) {
const auto &STI = MF.getSubtarget<RISCVSubtarget>();
const RISCVInstrInfo &TII = *STI.getInstrInfo();
- if (!STI.hasStdExtZicfilp())
+ if (!STI.hasZicfilpCFI())
return false;
uint32_t Label = 0;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index c16b23e290df1..0bc8f2136084d 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -90,12 +90,19 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
Quadratic,
NLog2N,
};
+ enum RISCVZicfilpCFISchemeEnum : uint8_t {
+ ZicfilpDisabled,
+ ZicfilpUnlabeled,
+ ZicfilpFuncSig,
+ };
// clang-format on
+
private:
virtual void anchor();
RISCVProcFamilyEnum RISCVProcFamily = Others;
RISCVVRGatherCostModelEnum RISCVVRGatherCostModel = Quadratic;
+ RISCVZicfilpCFISchemeEnum RISCVZicfilpCFIScheme = ZicfilpDisabled;
#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
bool ATTRIBUTE = DEFAULT;
@@ -205,6 +212,13 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
return HasStdExtZicond || HasVendorXVentanaCondOps;
}
+ RISCVZicfilpCFISchemeEnum getZicfilpCFIScheme() const {
+ return RISCVZicfilpCFIScheme;
+ }
+ bool hasZicfilpCFI() const {
+ return getZicfilpCFIScheme() != ZicfilpDisabled;
+ }
+
bool hasConditionalMoveFusion() const {
// Do we support fusing a branch+mv or branch+c.mv as a conditional move.
return (hasConditionalCompressedMoveFusion() && hasStdExtZca()) ||
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 52dc38564059c..259cb685a9dc9 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -17,6 +17,7 @@
#include "RISCVTargetObjectFile.h"
#include "RISCVTargetTransformInfo.h"
#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
@@ -31,11 +32,15 @@
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
#include "llvm/InitializePasses.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h"
@@ -178,6 +183,33 @@ RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
setCFIFixup(!EnableCFIInstrInserter);
}
+static void reportBadZicfilpUsage(const FeatureBitset &Features,
+ const Module &M) {
+ if (const Metadata *const CF = M.getModuleFlag("cf-protection-branch");
+ CF && !mdconst::extract<ConstantInt>(CF)->isZero()) {
+ StringRef LabelScheme;
+ if (const Metadata *const MD = M.getModuleFlag("cf-branch-label-scheme")) {
+ LabelScheme = cast<MDString>(MD)->getString();
+ if (LabelScheme != "func-sig" && LabelScheme != "unlabeled")
+ reportFatalUsageError("cf-branch-label-scheme=" + LabelScheme +
+ " module flag is unsupported");
+ } else {
+ reportFatalUsageError("missing cf-branch-label-scheme module flag");
+ }
+
+ if ((!Features[RISCV::FeatureZicfilpFuncSig] &&
+ LabelScheme == "func-sig") ||
+ (!Features[RISCV::FeatureZicfilpUnlabeled] &&
+ LabelScheme == "unlabeled"))
+ reportFatalUsageError(
+ "require target feature (+zicfilp-" + LabelScheme +
+ ") to handle cf-branch-label-scheme=" + LabelScheme + " module flag");
+ } else if (Features[RISCV::FeatureZicfilpFuncSig] ||
+ Features[RISCV::FeatureZicfilpUnlabeled]) {
+ reportFatalUsageError("require cf-protection-branch != 0 module flag");
+ }
+}
+
const RISCVSubtarget *
RISCVTargetMachine::getSubtargetImpl(const Function &F) const {
Attribute CPUAttr = F.getFnAttribute("target-cpu");
@@ -252,6 +284,9 @@ RISCVTargetMachine::getSubtargetImpl(const Function &F) const {
I = std::make_unique<RISCVSubtarget>(
TargetTriple, CPU, TuneCPU, FS, ABIName, RVVBitsMin, RVVBitsMax, *this);
}
+
+ if (const Module *const M = F.getParent())
+ reportBadZicfilpUsage(I->getFeatureBits(), *M);
return I.get();
}
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
index e987923233865..40ece5eb81884 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll
@@ -7,8 +7,6 @@
; RUN: | FileCheck %s
; RUN: llc -mtriple=riscv32 -relocation-model=pic -verify-machineinstrs < %s \
; RUN: | FileCheck %s
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+experimental-zicfilp < %s \
-; RUN: | FileCheck %s --check-prefixes=CHECK-ZICFILP
define void @relax_bcc(i1 %a) nounwind {
; CHECK-LABEL: relax_bcc:
@@ -22,19 +20,6 @@ define void @relax_bcc(i1 %a) nounwind {
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: .LBB0_2: # %tail
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_bcc:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: andi a0, a0, 1
-; CHECK-ZICFILP-NEXT: bnez a0, .LBB0_1
-; CHECK-ZICFILP-NEXT: j .LBB0_2
-; CHECK-ZICFILP-NEXT: .LBB0_1: # %iftrue
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 4096
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: .LBB0_2: # %tail
-; CHECK-ZICFILP-NEXT: ret
br i1 %a, label %iftrue, label %tail
iftrue:
@@ -67,29 +52,6 @@ define i32 @relax_jal(i1 %a) nounwind {
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_jal:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-ZICFILP-NEXT: andi a0, a0, 1
-; CHECK-ZICFILP-NEXT: bnez a0, .LBB1_1
-; CHECK-ZICFILP-NEXT: # %bb.4:
-; CHECK-ZICFILP-NEXT: jump .LBB1_2, t2
-; CHECK-ZICFILP-NEXT: .LBB1_1: # %iftrue
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB1_3
-; CHECK-ZICFILP-NEXT: .LBB1_2: # %jmp
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: .LBB1_3: # %tail
-; CHECK-ZICFILP-NEXT: li a0, 1
-; CHECK-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-ZICFILP-NEXT: ret
br i1 %a, label %iftrue, label %jmp
jmp:
@@ -348,247 +310,6 @@ define void @relax_jal_spill_32() {
; CHECK-NEXT: addi sp, sp, 64
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_jal_spill_32:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -64
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 64
-; CHECK-ZICFILP-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s9, 20(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s10, 16(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s11, 12(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: .cfi_offset ra, -4
-; CHECK-ZICFILP-NEXT: .cfi_offset s0, -8
-; CHECK-ZICFILP-NEXT: .cfi_offset s1, -12
-; CHECK-ZICFILP-NEXT: .cfi_offset s2, -16
-; CHECK-ZICFILP-NEXT: .cfi_offset s3, -20
-; CHECK-ZICFILP-NEXT: .cfi_offset s4, -24
-; CHECK-ZICFILP-NEXT: .cfi_offset s5, -28
-; CHECK-ZICFILP-NEXT: .cfi_offset s6, -32
-; CHECK-ZICFILP-NEXT: .cfi_offset s7, -36
-; CHECK-ZICFILP-NEXT: .cfi_offset s8, -40
-; CHECK-ZICFILP-NEXT: .cfi_offset s9, -44
-; CHECK-ZICFILP-NEXT: .cfi_offset s10, -48
-; CHECK-ZICFILP-NEXT: .cfi_offset s11, -52
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li ra, 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t0, 5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t1, 6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t2, 7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s0, 8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s1, 9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a0, 10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a1, 11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a2, 12
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a3, 13
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a4, 14
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a5, 15
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a6, 16
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a7, 17
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s2, 18
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s3, 19
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s4, 20
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s5, 21
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s6, 22
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s7, 23
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s8, 24
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s9, 25
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s10, 26
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s11, 27
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t3, 28
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t4, 29
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t5, 30
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t6, 31
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: beq t5, t6, .LBB2_1
-; CHECK-ZICFILP-NEXT: # %bb.3:
-; CHECK-ZICFILP-NEXT: sw t2, 0(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: jump .LBB2_4, t2
-; CHECK-ZICFILP-NEXT: .LBB2_1: # %branch_1
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB2_2
-; CHECK-ZICFILP-NEXT: .LBB2_4: # %branch_2
-; CHECK-ZICFILP-NEXT: lw t2, 0(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .LBB2_2: # %branch_2
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use ra
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s9, 20(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s10, 16(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s11, 12(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .cfi_restore ra
-; CHECK-ZICFILP-NEXT: .cfi_restore s0
-; CHECK-ZICFILP-NEXT: .cfi_restore s1
-; CHECK-ZICFILP-NEXT: .cfi_restore s2
-; CHECK-ZICFILP-NEXT: .cfi_restore s3
-; CHECK-ZICFILP-NEXT: .cfi_restore s4
-; CHECK-ZICFILP-NEXT: .cfi_restore s5
-; CHECK-ZICFILP-NEXT: .cfi_restore s6
-; CHECK-ZICFILP-NEXT: .cfi_restore s7
-; CHECK-ZICFILP-NEXT: .cfi_restore s8
-; CHECK-ZICFILP-NEXT: .cfi_restore s9
-; CHECK-ZICFILP-NEXT: .cfi_restore s10
-; CHECK-ZICFILP-NEXT: .cfi_restore s11
-; CHECK-ZICFILP-NEXT: addi sp, sp, 64
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; CHECK-ZICFILP-NEXT: ret
%ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
%t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
%t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
@@ -910,256 +631,6 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
; CHECK-NEXT: addi sp, sp, 2032
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_jal_spill_32_adjust_spill_slot:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 2032
-; CHECK-ZICFILP-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s1, 2020(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s2, 2016(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s3, 2012(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s4, 2008(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s5, 2004(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s6, 2000(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s7, 1996(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s8, 1992(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s9, 1988(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s10, 1984(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s11, 1980(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: .cfi_offset ra, -4
-; CHECK-ZICFILP-NEXT: .cfi_offset s0, -8
-; CHECK-ZICFILP-NEXT: .cfi_offset s1, -12
-; CHECK-ZICFILP-NEXT: .cfi_offset s2, -16
-; CHECK-ZICFILP-NEXT: .cfi_offset s3, -20
-; CHECK-ZICFILP-NEXT: .cfi_offset s4, -24
-; CHECK-ZICFILP-NEXT: .cfi_offset s5, -28
-; CHECK-ZICFILP-NEXT: .cfi_offset s6, -32
-; CHECK-ZICFILP-NEXT: .cfi_offset s7, -36
-; CHECK-ZICFILP-NEXT: .cfi_offset s8, -40
-; CHECK-ZICFILP-NEXT: .cfi_offset s9, -44
-; CHECK-ZICFILP-NEXT: .cfi_offset s10, -48
-; CHECK-ZICFILP-NEXT: .cfi_offset s11, -52
-; CHECK-ZICFILP-NEXT: addi s0, sp, 2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa s0, 0
-; CHECK-ZICFILP-NEXT: lui a0, 2
-; CHECK-ZICFILP-NEXT: addi a0, a0, -2032
-; CHECK-ZICFILP-NEXT: sub sp, sp, a0
-; CHECK-ZICFILP-NEXT: srli a0, sp, 12
-; CHECK-ZICFILP-NEXT: slli sp, a0, 12
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li ra, 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t0, 5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t1, 6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t2, 7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s0, 8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s1, 9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a0, 10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a1, 11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a2, 12
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a3, 13
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a4, 14
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a5, 15
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a6, 16
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a7, 17
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s2, 18
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s3, 19
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s4, 20
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s5, 21
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s6, 22
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s7, 23
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s8, 24
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s9, 25
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s10, 26
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s11, 27
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t3, 28
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t4, 29
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t5, 30
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t6, 31
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: beq t5, t6, .LBB3_1
-; CHECK-ZICFILP-NEXT: # %bb.3:
-; CHECK-ZICFILP-NEXT: sw t2, 0(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: jump .LBB3_4, t2
-; CHECK-ZICFILP-NEXT: .LBB3_1: # %branch_1
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB3_2
-; CHECK-ZICFILP-NEXT: .LBB3_4: # %branch_2
-; CHECK-ZICFILP-NEXT: lw t2, 0(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .LBB3_2: # %branch_2
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use ra
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: addi sp, s0, -2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa sp, 2032
-; CHECK-ZICFILP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s3, 2012(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s4, 2008(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s5, 2004(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s6, 2000(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s7, 1996(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s8, 1992(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s9, 1988(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s10, 1984(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s11, 1980(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .cfi_restore ra
-; CHECK-ZICFILP-NEXT: .cfi_restore s0
-; CHECK-ZICFILP-NEXT: .cfi_restore s1
-; CHECK-ZICFILP-NEXT: .cfi_restore s2
-; CHECK-ZICFILP-NEXT: .cfi_restore s3
-; CHECK-ZICFILP-NEXT: .cfi_restore s4
-; CHECK-ZICFILP-NEXT: .cfi_restore s5
-; CHECK-ZICFILP-NEXT: .cfi_restore s6
-; CHECK-ZICFILP-NEXT: .cfi_restore s7
-; CHECK-ZICFILP-NEXT: .cfi_restore s8
-; CHECK-ZICFILP-NEXT: .cfi_restore s9
-; CHECK-ZICFILP-NEXT: .cfi_restore s10
-; CHECK-ZICFILP-NEXT: .cfi_restore s11
-; CHECK-ZICFILP-NEXT: addi sp, sp, 2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; CHECK-ZICFILP-NEXT: ret
%stack_obj = alloca i32, align 4096
%ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
@@ -1489,265 +960,6 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
; CHECK-NEXT: # %bb.7: # %space
; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
; CHECK-NEXT: jump .LBB4_8, s11
-;
-; CHECK-ZICFILP-LABEL: relax_jal_spill_32_restore_block_correspondence:
-; CHECK-ZICFILP: # %bb.0: # %entry
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -64
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 64
-; CHECK-ZICFILP-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s9, 20(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s10, 16(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sw s11, 12(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: .cfi_offset ra, -4
-; CHECK-ZICFILP-NEXT: .cfi_offset s0, -8
-; CHECK-ZICFILP-NEXT: .cfi_offset s1, -12
-; CHECK-ZICFILP-NEXT: .cfi_offset s2, -16
-; CHECK-ZICFILP-NEXT: .cfi_offset s3, -20
-; CHECK-ZICFILP-NEXT: .cfi_offset s4, -24
-; CHECK-ZICFILP-NEXT: .cfi_offset s5, -28
-; CHECK-ZICFILP-NEXT: .cfi_offset s6, -32
-; CHECK-ZICFILP-NEXT: .cfi_offset s7, -36
-; CHECK-ZICFILP-NEXT: .cfi_offset s8, -40
-; CHECK-ZICFILP-NEXT: .cfi_offset s9, -44
-; CHECK-ZICFILP-NEXT: .cfi_offset s10, -48
-; CHECK-ZICFILP-NEXT: .cfi_offset s11, -52
-; CHECK-ZICFILP-NEXT: .cfi_remember_state
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li ra, 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t0, 5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t1, 6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t2, 7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s0, 8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s1, 9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a0, 10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a1, 11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a2, 12
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a3, 13
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a4, 14
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a5, 15
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a6, 16
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a7, 17
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s2, 18
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s3, 19
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s4, 20
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s5, 21
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s6, 22
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s7, 23
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s8, 24
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s9, 25
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s10, 26
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s11, 27
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t3, 28
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t4, 29
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t5, 30
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t6, 31
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: bne t5, t6, .LBB4_2
-; CHECK-ZICFILP-NEXT: j .LBB4_1
-; CHECK-ZICFILP-NEXT: .LBB4_8: # %dest_1
-; CHECK-ZICFILP-NEXT: lw t2, 0(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .LBB4_1: # %dest_1
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # dest 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB4_3
-; CHECK-ZICFILP-NEXT: .LBB4_2: # %cond_2
-; CHECK-ZICFILP-NEXT: bne t3, t4, .LBB4_5
-; CHECK-ZICFILP-NEXT: .LBB4_3: # %dest_2
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # dest 2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: .LBB4_4: # %dest_3
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # dest 3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use ra
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s9, 20(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s10, 16(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: lw s11, 12(sp) # 4-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .cfi_restore ra
-; CHECK-ZICFILP-NEXT: .cfi_restore s0
-; CHECK-ZICFILP-NEXT: .cfi_restore s1
-; CHECK-ZICFILP-NEXT: .cfi_restore s2
-; CHECK-ZICFILP-NEXT: .cfi_restore s3
-; CHECK-ZICFILP-NEXT: .cfi_restore s4
-; CHECK-ZICFILP-NEXT: .cfi_restore s5
-; CHECK-ZICFILP-NEXT: .cfi_restore s6
-; CHECK-ZICFILP-NEXT: .cfi_restore s7
-; CHECK-ZICFILP-NEXT: .cfi_restore s8
-; CHECK-ZICFILP-NEXT: .cfi_restore s9
-; CHECK-ZICFILP-NEXT: .cfi_restore s10
-; CHECK-ZICFILP-NEXT: .cfi_restore s11
-; CHECK-ZICFILP-NEXT: addi sp, sp, 64
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; CHECK-ZICFILP-NEXT: ret
-; CHECK-ZICFILP-NEXT: .LBB4_5: # %cond_3
-; CHECK-ZICFILP-NEXT: .cfi_restore_state
-; CHECK-ZICFILP-NEXT: beq t1, t2, .LBB4_4
-; CHECK-ZICFILP-NEXT: # %bb.6: # %space
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: # %bb.7: # %space
-; CHECK-ZICFILP-NEXT: sw t2, 0(sp) # 4-byte Folded Spill
-; CHECK-ZICFILP-NEXT: jump .LBB4_8, t2
entry:
%ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
%t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
index c54ed1b06b1c8..f3cd9c317ec49 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll
@@ -7,8 +7,6 @@
; RUN: | FileCheck %s
; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
; RUN: | FileCheck %s
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+experimental-zicfilp < %s \
-; RUN: | FileCheck %s --check-prefixes=CHECK-ZICFILP
define void @relax_bcc(i1 %a) nounwind {
; CHECK-LABEL: relax_bcc:
@@ -22,19 +20,6 @@ define void @relax_bcc(i1 %a) nounwind {
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: .LBB0_2: # %tail
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_bcc:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: andi a0, a0, 1
-; CHECK-ZICFILP-NEXT: bnez a0, .LBB0_1
-; CHECK-ZICFILP-NEXT: j .LBB0_2
-; CHECK-ZICFILP-NEXT: .LBB0_1: # %iftrue
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 4096
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: .LBB0_2: # %tail
-; CHECK-ZICFILP-NEXT: ret
br i1 %a, label %iftrue, label %tail
iftrue:
@@ -67,29 +52,6 @@ define i32 @relax_jal(i1 %a) nounwind {
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_jal:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-ZICFILP-NEXT: andi a0, a0, 1
-; CHECK-ZICFILP-NEXT: bnez a0, .LBB1_1
-; CHECK-ZICFILP-NEXT: # %bb.4:
-; CHECK-ZICFILP-NEXT: jump .LBB1_2, t2
-; CHECK-ZICFILP-NEXT: .LBB1_1: # %iftrue
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB1_3
-; CHECK-ZICFILP-NEXT: .LBB1_2: # %jmp
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: .LBB1_3: # %tail
-; CHECK-ZICFILP-NEXT: li a0, 1
-; CHECK-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-ZICFILP-NEXT: ret
br i1 %a, label %iftrue, label %jmp
jmp:
@@ -110,7 +72,6 @@ tail:
define void @relax_jal_spill_64() {
-;
; CHECK-LABEL: relax_jal_spill_64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -112
@@ -350,247 +311,6 @@ define void @relax_jal_spill_64() {
; CHECK-NEXT: addi sp, sp, 112
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_jal_spill_64:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -112
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 112
-; CHECK-ZICFILP-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s2, 80(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s3, 72(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s4, 64(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s5, 56(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s6, 48(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s7, 40(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s8, 32(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s9, 24(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s10, 16(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s11, 8(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: .cfi_offset ra, -8
-; CHECK-ZICFILP-NEXT: .cfi_offset s0, -16
-; CHECK-ZICFILP-NEXT: .cfi_offset s1, -24
-; CHECK-ZICFILP-NEXT: .cfi_offset s2, -32
-; CHECK-ZICFILP-NEXT: .cfi_offset s3, -40
-; CHECK-ZICFILP-NEXT: .cfi_offset s4, -48
-; CHECK-ZICFILP-NEXT: .cfi_offset s5, -56
-; CHECK-ZICFILP-NEXT: .cfi_offset s6, -64
-; CHECK-ZICFILP-NEXT: .cfi_offset s7, -72
-; CHECK-ZICFILP-NEXT: .cfi_offset s8, -80
-; CHECK-ZICFILP-NEXT: .cfi_offset s9, -88
-; CHECK-ZICFILP-NEXT: .cfi_offset s10, -96
-; CHECK-ZICFILP-NEXT: .cfi_offset s11, -104
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li ra, 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t0, 5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t1, 6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t2, 7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s0, 8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s1, 9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a0, 10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a1, 11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a2, 12
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a3, 13
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a4, 14
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a5, 15
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a6, 16
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a7, 17
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s2, 18
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s3, 19
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s4, 20
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s5, 21
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s6, 22
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s7, 23
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s8, 24
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s9, 25
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s10, 26
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s11, 27
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t3, 28
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t4, 29
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t5, 30
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t6, 31
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: beq t5, t6, .LBB2_1
-; CHECK-ZICFILP-NEXT: # %bb.3:
-; CHECK-ZICFILP-NEXT: sd t2, 0(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: jump .LBB2_4, t2
-; CHECK-ZICFILP-NEXT: .LBB2_1: # %branch_1
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB2_2
-; CHECK-ZICFILP-NEXT: .LBB2_4: # %branch_2
-; CHECK-ZICFILP-NEXT: ld t2, 0(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .LBB2_2: # %branch_2
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use ra
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s2, 80(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s3, 72(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s4, 64(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s5, 56(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s6, 48(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s7, 40(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s8, 32(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .cfi_restore ra
-; CHECK-ZICFILP-NEXT: .cfi_restore s0
-; CHECK-ZICFILP-NEXT: .cfi_restore s1
-; CHECK-ZICFILP-NEXT: .cfi_restore s2
-; CHECK-ZICFILP-NEXT: .cfi_restore s3
-; CHECK-ZICFILP-NEXT: .cfi_restore s4
-; CHECK-ZICFILP-NEXT: .cfi_restore s5
-; CHECK-ZICFILP-NEXT: .cfi_restore s6
-; CHECK-ZICFILP-NEXT: .cfi_restore s7
-; CHECK-ZICFILP-NEXT: .cfi_restore s8
-; CHECK-ZICFILP-NEXT: .cfi_restore s9
-; CHECK-ZICFILP-NEXT: .cfi_restore s10
-; CHECK-ZICFILP-NEXT: .cfi_restore s11
-; CHECK-ZICFILP-NEXT: addi sp, sp, 112
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; CHECK-ZICFILP-NEXT: ret
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
%t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
%t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
@@ -661,7 +381,6 @@ branch_2:
}
define void @relax_jal_spill_64_adjust_spill_slot() {
-;
; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
; is out the range of 12-bit signed integer, check whether the spill slot is
; adjusted to close to the stack base register.
@@ -913,256 +632,6 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
; CHECK-NEXT: addi sp, sp, 2032
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
-;
-; CHECK-ZICFILP-LABEL: relax_jal_spill_64_adjust_spill_slot:
-; CHECK-ZICFILP: # %bb.0:
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 2032
-; CHECK-ZICFILP-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s1, 2008(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s2, 2000(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s3, 1992(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s4, 1984(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s5, 1976(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s6, 1968(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s7, 1960(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s8, 1952(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s9, 1944(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s10, 1936(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s11, 1928(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: .cfi_offset ra, -8
-; CHECK-ZICFILP-NEXT: .cfi_offset s0, -16
-; CHECK-ZICFILP-NEXT: .cfi_offset s1, -24
-; CHECK-ZICFILP-NEXT: .cfi_offset s2, -32
-; CHECK-ZICFILP-NEXT: .cfi_offset s3, -40
-; CHECK-ZICFILP-NEXT: .cfi_offset s4, -48
-; CHECK-ZICFILP-NEXT: .cfi_offset s5, -56
-; CHECK-ZICFILP-NEXT: .cfi_offset s6, -64
-; CHECK-ZICFILP-NEXT: .cfi_offset s7, -72
-; CHECK-ZICFILP-NEXT: .cfi_offset s8, -80
-; CHECK-ZICFILP-NEXT: .cfi_offset s9, -88
-; CHECK-ZICFILP-NEXT: .cfi_offset s10, -96
-; CHECK-ZICFILP-NEXT: .cfi_offset s11, -104
-; CHECK-ZICFILP-NEXT: addi s0, sp, 2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa s0, 0
-; CHECK-ZICFILP-NEXT: lui a0, 2
-; CHECK-ZICFILP-NEXT: addi a0, a0, -2032
-; CHECK-ZICFILP-NEXT: sub sp, sp, a0
-; CHECK-ZICFILP-NEXT: srli a0, sp, 12
-; CHECK-ZICFILP-NEXT: slli sp, a0, 12
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li ra, 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t0, 5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t1, 6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t2, 7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s0, 8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s1, 9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a0, 10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a1, 11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a2, 12
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a3, 13
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a4, 14
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a5, 15
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a6, 16
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a7, 17
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s2, 18
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s3, 19
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s4, 20
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s5, 21
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s6, 22
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s7, 23
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s8, 24
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s9, 25
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s10, 26
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s11, 27
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t3, 28
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t4, 29
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t5, 30
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t6, 31
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: beq t5, t6, .LBB3_1
-; CHECK-ZICFILP-NEXT: # %bb.3:
-; CHECK-ZICFILP-NEXT: sd t2, 0(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: jump .LBB3_4, t2
-; CHECK-ZICFILP-NEXT: .LBB3_1: # %branch_1
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB3_2
-; CHECK-ZICFILP-NEXT: .LBB3_4: # %branch_2
-; CHECK-ZICFILP-NEXT: ld t2, 0(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .LBB3_2: # %branch_2
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use ra
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: addi sp, s0, -2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa sp, 2032
-; CHECK-ZICFILP-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s1, 2008(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s2, 2000(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s3, 1992(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s4, 1984(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s5, 1976(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s6, 1968(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s7, 1960(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s8, 1952(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s9, 1944(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s10, 1936(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s11, 1928(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .cfi_restore ra
-; CHECK-ZICFILP-NEXT: .cfi_restore s0
-; CHECK-ZICFILP-NEXT: .cfi_restore s1
-; CHECK-ZICFILP-NEXT: .cfi_restore s2
-; CHECK-ZICFILP-NEXT: .cfi_restore s3
-; CHECK-ZICFILP-NEXT: .cfi_restore s4
-; CHECK-ZICFILP-NEXT: .cfi_restore s5
-; CHECK-ZICFILP-NEXT: .cfi_restore s6
-; CHECK-ZICFILP-NEXT: .cfi_restore s7
-; CHECK-ZICFILP-NEXT: .cfi_restore s8
-; CHECK-ZICFILP-NEXT: .cfi_restore s9
-; CHECK-ZICFILP-NEXT: .cfi_restore s10
-; CHECK-ZICFILP-NEXT: .cfi_restore s11
-; CHECK-ZICFILP-NEXT: addi sp, sp, 2032
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; CHECK-ZICFILP-NEXT: ret
%stack_obj = alloca i64, align 4096
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
@@ -1235,7 +704,6 @@ branch_2:
}
define void @relax_jal_spill_64_restore_block_correspondence() {
-;
; CHECK-LABEL: relax_jal_spill_64_restore_block_correspondence:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -112
@@ -1493,265 +961,6 @@ define void @relax_jal_spill_64_restore_block_correspondence() {
; CHECK-NEXT: # %bb.7: # %space
; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
; CHECK-NEXT: jump .LBB4_8, s11
-;
-; CHECK-ZICFILP-LABEL: relax_jal_spill_64_restore_block_correspondence:
-; CHECK-ZICFILP: # %bb.0: # %entry
-; CHECK-ZICFILP-NEXT: lpad 0
-; CHECK-ZICFILP-NEXT: addi sp, sp, -112
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 112
-; CHECK-ZICFILP-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s2, 80(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s3, 72(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s4, 64(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s5, 56(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s6, 48(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s7, 40(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s8, 32(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s9, 24(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s10, 16(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: sd s11, 8(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: .cfi_offset ra, -8
-; CHECK-ZICFILP-NEXT: .cfi_offset s0, -16
-; CHECK-ZICFILP-NEXT: .cfi_offset s1, -24
-; CHECK-ZICFILP-NEXT: .cfi_offset s2, -32
-; CHECK-ZICFILP-NEXT: .cfi_offset s3, -40
-; CHECK-ZICFILP-NEXT: .cfi_offset s4, -48
-; CHECK-ZICFILP-NEXT: .cfi_offset s5, -56
-; CHECK-ZICFILP-NEXT: .cfi_offset s6, -64
-; CHECK-ZICFILP-NEXT: .cfi_offset s7, -72
-; CHECK-ZICFILP-NEXT: .cfi_offset s8, -80
-; CHECK-ZICFILP-NEXT: .cfi_offset s9, -88
-; CHECK-ZICFILP-NEXT: .cfi_offset s10, -96
-; CHECK-ZICFILP-NEXT: .cfi_offset s11, -104
-; CHECK-ZICFILP-NEXT: .cfi_remember_state
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li ra, 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t0, 5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t1, 6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t2, 7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s0, 8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s1, 9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a0, 10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a1, 11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a2, 12
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a3, 13
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a4, 14
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a5, 15
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a6, 16
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li a7, 17
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s2, 18
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s3, 19
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s4, 20
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s5, 21
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s6, 22
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s7, 23
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s8, 24
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s9, 25
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s10, 26
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li s11, 27
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t3, 28
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t4, 29
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t5, 30
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: li t6, 31
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: bne t5, t6, .LBB4_2
-; CHECK-ZICFILP-NEXT: j .LBB4_1
-; CHECK-ZICFILP-NEXT: .LBB4_8: # %dest_1
-; CHECK-ZICFILP-NEXT: ld t2, 0(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .LBB4_1: # %dest_1
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # dest 1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: j .LBB4_3
-; CHECK-ZICFILP-NEXT: .LBB4_2: # %cond_2
-; CHECK-ZICFILP-NEXT: bne t3, t4, .LBB4_5
-; CHECK-ZICFILP-NEXT: .LBB4_3: # %dest_2
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # dest 2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: .LBB4_4: # %dest_3
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # dest 3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use ra
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a0
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a1
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use a7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s2
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s7
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s8
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s9
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s10
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use s11
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t3
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t4
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t5
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: # reg use t6
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s2, 80(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s3, 72(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s4, 64(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s5, 56(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s6, 48(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s7, 40(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s8, 32(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
-; CHECK-ZICFILP-NEXT: .cfi_restore ra
-; CHECK-ZICFILP-NEXT: .cfi_restore s0
-; CHECK-ZICFILP-NEXT: .cfi_restore s1
-; CHECK-ZICFILP-NEXT: .cfi_restore s2
-; CHECK-ZICFILP-NEXT: .cfi_restore s3
-; CHECK-ZICFILP-NEXT: .cfi_restore s4
-; CHECK-ZICFILP-NEXT: .cfi_restore s5
-; CHECK-ZICFILP-NEXT: .cfi_restore s6
-; CHECK-ZICFILP-NEXT: .cfi_restore s7
-; CHECK-ZICFILP-NEXT: .cfi_restore s8
-; CHECK-ZICFILP-NEXT: .cfi_restore s9
-; CHECK-ZICFILP-NEXT: .cfi_restore s10
-; CHECK-ZICFILP-NEXT: .cfi_restore s11
-; CHECK-ZICFILP-NEXT: addi sp, sp, 112
-; CHECK-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; CHECK-ZICFILP-NEXT: ret
-; CHECK-ZICFILP-NEXT: .LBB4_5: # %cond_3
-; CHECK-ZICFILP-NEXT: .cfi_restore_state
-; CHECK-ZICFILP-NEXT: beq t1, t2, .LBB4_4
-; CHECK-ZICFILP-NEXT: # %bb.6: # %space
-; CHECK-ZICFILP-NEXT: #APP
-; CHECK-ZICFILP-NEXT: .zero 1048576
-; CHECK-ZICFILP-NEXT: #NO_APP
-; CHECK-ZICFILP-NEXT: # %bb.7: # %space
-; CHECK-ZICFILP-NEXT: sd t2, 0(sp) # 8-byte Folded Spill
-; CHECK-ZICFILP-NEXT: jump .LBB4_8, t2
entry:
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
%t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
diff --git a/llvm/test/CodeGen/RISCV/calls.ll b/llvm/test/CodeGen/RISCV/calls.ll
index f30c453d7f6bc..1a3e77b8b8ea1 100644
--- a/llvm/test/CodeGen/RISCV/calls.ll
+++ b/llvm/test/CodeGen/RISCV/calls.ll
@@ -11,8 +11,6 @@
; RUN: | FileCheck -check-prefix=RV64I-MEDIUM %s
; RUN: llc -code-model=large -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I-LARGE %s
-; RUN: llc -code-model=large -mtriple=riscv64 -mattr=experimental-zicfilp -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV64I-LARGE-ZICFILP %s
declare i32 @external_function(i32)
@@ -64,19 +62,6 @@ define i32 @test_call_external(i32 %a) nounwind {
; RV64I-LARGE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 16
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_external:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi0:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi0)(a1)
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 @external_function(i32 %a)
ret i32 %1
}
@@ -131,19 +116,6 @@ define i32 @test_call_dso_local(i32 %a) nounwind {
; RV64I-LARGE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 16
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_dso_local:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi1:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi1)(a1)
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 @dso_local_function(i32 %a)
ret i32 %1
}
@@ -173,12 +145,6 @@ define i32 @defined_function(i32 %a) nounwind {
; RV64I-LARGE: # %bb.0:
; RV64I-LARGE-NEXT: addiw a0, a0, 1
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: defined_function:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addiw a0, a0, 1
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = add i32 %a, 1
ret i32 %1
}
@@ -231,19 +197,6 @@ define i32 @test_call_defined(i32 %a) nounwind {
; RV64I-LARGE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 16
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_defined:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi2:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI3_0)
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi2)(a1)
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 @defined_function(i32 %a)
ret i32 %1
}
@@ -303,18 +256,6 @@ define i32 @test_call_indirect(ptr %a, i32 %b) nounwind {
; RV64I-LARGE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 16
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_indirect:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: mv a2, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a0, a1
-; RV64I-LARGE-ZICFILP-NEXT: jalr a2
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 %a(i32 %b)
ret i32 %1
}
@@ -406,24 +347,6 @@ define i32 @test_call_indirect_no_t0(ptr %a, i32 %b, i32 %c, i32 %d, i32 %e, i32
; RV64I-LARGE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 16
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_indirect_no_t0:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: mv t1, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a0, a1
-; RV64I-LARGE-ZICFILP-NEXT: mv a1, a2
-; RV64I-LARGE-ZICFILP-NEXT: mv a2, a3
-; RV64I-LARGE-ZICFILP-NEXT: mv a3, a4
-; RV64I-LARGE-ZICFILP-NEXT: mv a4, a5
-; RV64I-LARGE-ZICFILP-NEXT: mv a5, a6
-; RV64I-LARGE-ZICFILP-NEXT: mv a6, a7
-; RV64I-LARGE-ZICFILP-NEXT: jalr t1
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 %a(i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
ret i32 %1
}
@@ -456,12 +379,6 @@ define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
; RV64I-LARGE: # %bb.0:
; RV64I-LARGE-NEXT: addw a0, a0, a1
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: fastcc_function:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addw a0, a0, a1
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = add i32 %a, %b
ret i32 %1
}
@@ -535,24 +452,6 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
; RV64I-LARGE-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 16
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_fastcc:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: mv s0, a0
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi3:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI7_0)
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV64I-LARGE-ZICFILP-NEXT: mv a0, s0
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: mv a0, s0
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call fastcc i32 @fastcc_function(i32 %a, i32 %b)
ret i32 %a
}
@@ -673,33 +572,6 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
; RV64I-LARGE-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 32
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_external_many_args:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -32
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: mv s0, a0
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi4:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI8_0)
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV64I-LARGE-ZICFILP-NEXT: sd s0, 0(sp)
-; RV64I-LARGE-ZICFILP-NEXT: sd s0, 8(sp)
-; RV64I-LARGE-ZICFILP-NEXT: mv a0, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a1, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a2, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a3, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a4, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a5, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a6, s0
-; RV64I-LARGE-ZICFILP-NEXT: mv a7, s0
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: mv a0, s0
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 32
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 @external_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a,
i32 %a, i32 %a, i32 %a, i32 %a, i32 %a)
ret i32 %a
@@ -735,13 +607,6 @@ define i32 @defined_many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 %
; RV64I-LARGE-NEXT: lw a0, 8(sp)
; RV64I-LARGE-NEXT: addiw a0, a0, 1
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: defined_many_args:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: lw a0, 8(sp)
-; RV64I-LARGE-ZICFILP-NEXT: addiw a0, a0, 1
-; RV64I-LARGE-ZICFILP-NEXT: ret
%added = add i32 %j, 1
ret i32 %added
}
@@ -839,28 +704,6 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
; RV64I-LARGE-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-LARGE-NEXT: addi sp, sp, 32
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: test_call_defined_many_args:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -32
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi5:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI10_0)
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi5)(a1)
-; RV64I-LARGE-ZICFILP-NEXT: sd a0, 0(sp)
-; RV64I-LARGE-ZICFILP-NEXT: sd a0, 8(sp)
-; RV64I-LARGE-ZICFILP-NEXT: mv a1, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a2, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a3, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a4, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a5, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a6, a0
-; RV64I-LARGE-ZICFILP-NEXT: mv a7, a0
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 32
-; RV64I-LARGE-ZICFILP-NEXT: ret
%1 = call i32 @defined_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a,
i32 %a, i32 %a, i32 %a, i32 %a, i32 %a)
ret i32 %1
@@ -994,35 +837,6 @@ define fastcc void @fastcc_call_nonfastcc(){
; RV64I-LARGE-NEXT: addi sp, sp, 32
; RV64I-LARGE-NEXT: .cfi_def_cfa_offset 0
; RV64I-LARGE-NEXT: ret
-;
-; RV64I-LARGE-ZICFILP-LABEL: fastcc_call_nonfastcc:
-; RV64I-LARGE-ZICFILP: # %bb.0:
-; RV64I-LARGE-ZICFILP-NEXT: lpad 0
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, -32
-; RV64I-LARGE-ZICFILP-NEXT: .cfi_def_cfa_offset 32
-; RV64I-LARGE-ZICFILP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64I-LARGE-ZICFILP-NEXT: .cfi_offset ra, -8
-; RV64I-LARGE-ZICFILP-NEXT: li t0, 10
-; RV64I-LARGE-ZICFILP-NEXT: li t1, 9
-; RV64I-LARGE-ZICFILP-NEXT: .Lpcrel_hi6:
-; RV64I-LARGE-ZICFILP-NEXT: auipc a5, %pcrel_hi(.LCPI11_0)
-; RV64I-LARGE-ZICFILP-NEXT: li a0, 1
-; RV64I-LARGE-ZICFILP-NEXT: li a1, 2
-; RV64I-LARGE-ZICFILP-NEXT: li a2, 3
-; RV64I-LARGE-ZICFILP-NEXT: li a3, 4
-; RV64I-LARGE-ZICFILP-NEXT: li a4, 5
-; RV64I-LARGE-ZICFILP-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi6)(a5)
-; RV64I-LARGE-ZICFILP-NEXT: li a5, 6
-; RV64I-LARGE-ZICFILP-NEXT: li a6, 7
-; RV64I-LARGE-ZICFILP-NEXT: li a7, 8
-; RV64I-LARGE-ZICFILP-NEXT: sd t1, 0(sp)
-; RV64I-LARGE-ZICFILP-NEXT: sd t0, 8(sp)
-; RV64I-LARGE-ZICFILP-NEXT: jalr t2
-; RV64I-LARGE-ZICFILP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64I-LARGE-ZICFILP-NEXT: .cfi_restore ra
-; RV64I-LARGE-ZICFILP-NEXT: addi sp, sp, 32
-; RV64I-LARGE-ZICFILP-NEXT: .cfi_def_cfa_offset 0
-; RV64I-LARGE-ZICFILP-NEXT: ret
call void @external_many_args(i32 1, i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10)
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index b3fa871c859a0..f8398a8d0e765 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -289,6 +289,8 @@
; CHECK-NEXT: ziccif - 'Ziccif' (Main Memory Supports Instruction Fetch with Atomicity Requirement).
; CHECK-NEXT: zicclsm - 'Zicclsm' (Main Memory Supports Misaligned Loads/Stores).
; CHECK-NEXT: ziccrse - 'Ziccrse' (Main Memory Supports Forward Progress on LR/SC Sequences).
+; CHECK-NEXT: zicfilp-func-sig - Enforce forward-edge control-flow integrity with ZICFILP-func-sig.
+; CHECK-NEXT: zicfilp-unlabeled - Enforce forward-edge control-flow integrity with ZICFILP-unlabeled.
; CHECK-NEXT: zicntr - 'Zicntr' (Base Counters and Timers).
; CHECK-NEXT: zicond - 'Zicond' (Integer Conditional Operations).
; CHECK-NEXT: zicsr - 'Zicsr' (CSRs).
diff --git a/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll b/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll
index b4ab0585c0cc9..bd340c617423a 100644
--- a/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll
+++ b/llvm/test/CodeGen/RISCV/jumptable-swguarded.ll
@@ -1,14 +1,11 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple riscv32 -mattr=+experimental-zicfilp < %s | FileCheck %s
-; RUN: llc -mtriple riscv64 -mattr=+experimental-zicfilp < %s | FileCheck %s
-; RUN: llc -mtriple riscv32 < %s | FileCheck %s --check-prefix=NO-ZICFILP
-; RUN: llc -mtriple riscv64 < %s | FileCheck %s --check-prefix=NO-ZICFILP
+; RUN: llc -mtriple riscv32 -mattr=+zicfilp-unlabeled < %s | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+zicfilp-unlabeled < %s | FileCheck %s
; Test using t2 to jump table branch.
define void @above_threshold(i32 signext %in, ptr %out) nounwind {
; CHECK-LABEL: above_threshold:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: li a2, 5
; CHECK-NEXT: bltu a2, a0, .LBB0_9
@@ -40,40 +37,6 @@ define void @above_threshold(i32 signext %in, ptr %out) nounwind {
; CHECK-NEXT: sw a0, 0(a1)
; CHECK-NEXT: .LBB0_9: # %exit
; CHECK-NEXT: ret
-;
-; NO-ZICFILP-LABEL: above_threshold:
-; NO-ZICFILP: # %bb.0: # %entry
-; NO-ZICFILP-NEXT: addi a0, a0, -1
-; NO-ZICFILP-NEXT: li a2, 5
-; NO-ZICFILP-NEXT: bltu a2, a0, .LBB0_9
-; NO-ZICFILP-NEXT: # %bb.1: # %entry
-; NO-ZICFILP-NEXT: slli a0, a0, 2
-; NO-ZICFILP-NEXT: lui a2, %hi(.LJTI0_0)
-; NO-ZICFILP-NEXT: addi a2, a2, %lo(.LJTI0_0)
-; NO-ZICFILP-NEXT: add a0, a2, a0
-; NO-ZICFILP-NEXT: lw a0, 0(a0)
-; NO-ZICFILP-NEXT: jr a0
-; NO-ZICFILP-NEXT: .LBB0_2: # %bb1
-; NO-ZICFILP-NEXT: li a0, 4
-; NO-ZICFILP-NEXT: j .LBB0_8
-; NO-ZICFILP-NEXT: .LBB0_3: # %bb5
-; NO-ZICFILP-NEXT: li a0, 100
-; NO-ZICFILP-NEXT: j .LBB0_8
-; NO-ZICFILP-NEXT: .LBB0_4: # %bb3
-; NO-ZICFILP-NEXT: li a0, 2
-; NO-ZICFILP-NEXT: j .LBB0_8
-; NO-ZICFILP-NEXT: .LBB0_5: # %bb4
-; NO-ZICFILP-NEXT: li a0, 1
-; NO-ZICFILP-NEXT: j .LBB0_8
-; NO-ZICFILP-NEXT: .LBB0_6: # %bb2
-; NO-ZICFILP-NEXT: li a0, 3
-; NO-ZICFILP-NEXT: j .LBB0_8
-; NO-ZICFILP-NEXT: .LBB0_7: # %bb6
-; NO-ZICFILP-NEXT: li a0, 200
-; NO-ZICFILP-NEXT: .LBB0_8: # %exit
-; NO-ZICFILP-NEXT: sw a0, 0(a1)
-; NO-ZICFILP-NEXT: .LBB0_9: # %exit
-; NO-ZICFILP-NEXT: ret
entry:
switch i32 %in, label %exit [
i32 1, label %bb1
@@ -104,3 +67,8 @@ bb6:
exit:
ret void
}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/lpad.ll b/llvm/test/CodeGen/RISCV/lpad.ll
index 28873ab6c49a4..3c4726e897061 100644
--- a/llvm/test/CodeGen/RISCV/lpad.ll
+++ b/llvm/test/CodeGen/RISCV/lpad.ll
@@ -1,17 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple riscv32 -mattr=+experimental-zicfilp < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple riscv64 -mattr=+experimental-zicfilp < %s | FileCheck %s --check-prefixes=CHECK,RV64
-; RUN: llc -mtriple riscv32 -mattr=+experimental-zicfilp \
-; RUN: -riscv-landing-pad-label=1 < %s | FileCheck %s --check-prefixes=FIXED-ONE,FIXED-ONE-RV32
-; RUN: llc -mtriple riscv64 -mattr=+experimental-zicfilp \
-; RUN: -riscv-landing-pad-label=1 < %s | FileCheck %s --check-prefixes=FIXED-ONE,FIXED-ONE-RV64
+; RUN: llc -mtriple riscv32 -mattr=+zicfilp-unlabeled < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple riscv64 -mattr=+zicfilp-unlabeled < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple riscv32 -mattr=+zicfilp-unlabeled -riscv-landing-pad-label=1 < %s | FileCheck %s --check-prefixes=FIXED-ONE,FIXED-ONE-RV32
+; RUN: llc -mtriple riscv64 -mattr=+zicfilp-unlabeled -riscv-landing-pad-label=1 < %s | FileCheck %s --check-prefixes=FIXED-ONE,FIXED-ONE-RV64
; Check indirectbr.
@__const.indirctbr.addr = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@indirctbr, %labelA), ptr blockaddress(@indirctbr, %labelB)], align 8
define void @indirctbr(i32 %i, ptr %p) {
; RV32-LABEL: indirctbr:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: lpad 0
+; RV32-NEXT: auipc zero, 0
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: lui a2, %hi(.L__const.indirctbr.addr)
; RV32-NEXT: addi a2, a2, %lo(.L__const.indirctbr.addr)
@@ -21,20 +19,20 @@ define void @indirctbr(i32 %i, ptr %p) {
; RV32-NEXT: .p2align 2
; RV32-NEXT: .Ltmp3: # Block address taken
; RV32-NEXT: .LBB0_1: # %labelA
-; RV32-NEXT: lpad 0
+; RV32-NEXT: auipc zero, 0
; RV32-NEXT: li a0, 1
; RV32-NEXT: sw a0, 0(a1)
; RV32-NEXT: .p2align 2
; RV32-NEXT: .Ltmp4: # Block address taken
; RV32-NEXT: .LBB0_2: # %labelB
-; RV32-NEXT: lpad 0
+; RV32-NEXT: auipc zero, 0
; RV32-NEXT: li a0, 2
; RV32-NEXT: sw a0, 0(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: indirctbr:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: lui a2, %hi(.L__const.indirctbr.addr)
@@ -45,20 +43,20 @@ define void @indirctbr(i32 %i, ptr %p) {
; RV64-NEXT: .p2align 2
; RV64-NEXT: .Ltmp3: # Block address taken
; RV64-NEXT: .LBB0_1: # %labelA
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: li a0, 1
; RV64-NEXT: sw a0, 0(a1)
; RV64-NEXT: .p2align 2
; RV64-NEXT: .Ltmp4: # Block address taken
; RV64-NEXT: .LBB0_2: # %labelB
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: li a0, 2
; RV64-NEXT: sw a0, 0(a1)
; RV64-NEXT: ret
;
; FIXED-ONE-RV32-LABEL: indirctbr:
; FIXED-ONE-RV32: # %bb.0: # %entry
-; FIXED-ONE-RV32-NEXT: lpad 1
+; FIXED-ONE-RV32-NEXT: auipc zero, 1
; FIXED-ONE-RV32-NEXT: slli a0, a0, 2
; FIXED-ONE-RV32-NEXT: lui a2, %hi(.L__const.indirctbr.addr)
; FIXED-ONE-RV32-NEXT: addi a2, a2, %lo(.L__const.indirctbr.addr)
@@ -69,20 +67,20 @@ define void @indirctbr(i32 %i, ptr %p) {
; FIXED-ONE-RV32-NEXT: .p2align 2
; FIXED-ONE-RV32-NEXT: .Ltmp3: # Block address taken
; FIXED-ONE-RV32-NEXT: .LBB0_1: # %labelA
-; FIXED-ONE-RV32-NEXT: lpad 1
+; FIXED-ONE-RV32-NEXT: auipc zero, 1
; FIXED-ONE-RV32-NEXT: li a0, 1
; FIXED-ONE-RV32-NEXT: sw a0, 0(a1)
; FIXED-ONE-RV32-NEXT: .p2align 2
; FIXED-ONE-RV32-NEXT: .Ltmp4: # Block address taken
; FIXED-ONE-RV32-NEXT: .LBB0_2: # %labelB
-; FIXED-ONE-RV32-NEXT: lpad 1
+; FIXED-ONE-RV32-NEXT: auipc zero, 1
; FIXED-ONE-RV32-NEXT: li a0, 2
; FIXED-ONE-RV32-NEXT: sw a0, 0(a1)
; FIXED-ONE-RV32-NEXT: ret
;
; FIXED-ONE-RV64-LABEL: indirctbr:
; FIXED-ONE-RV64: # %bb.0: # %entry
-; FIXED-ONE-RV64-NEXT: lpad 1
+; FIXED-ONE-RV64-NEXT: auipc zero, 1
; FIXED-ONE-RV64-NEXT: sext.w a0, a0
; FIXED-ONE-RV64-NEXT: slli a0, a0, 3
; FIXED-ONE-RV64-NEXT: lui a2, %hi(.L__const.indirctbr.addr)
@@ -94,13 +92,13 @@ define void @indirctbr(i32 %i, ptr %p) {
; FIXED-ONE-RV64-NEXT: .p2align 2
; FIXED-ONE-RV64-NEXT: .Ltmp3: # Block address taken
; FIXED-ONE-RV64-NEXT: .LBB0_1: # %labelA
-; FIXED-ONE-RV64-NEXT: lpad 1
+; FIXED-ONE-RV64-NEXT: auipc zero, 1
; FIXED-ONE-RV64-NEXT: li a0, 1
; FIXED-ONE-RV64-NEXT: sw a0, 0(a1)
; FIXED-ONE-RV64-NEXT: .p2align 2
; FIXED-ONE-RV64-NEXT: .Ltmp4: # Block address taken
; FIXED-ONE-RV64-NEXT: .LBB0_2: # %labelB
-; FIXED-ONE-RV64-NEXT: lpad 1
+; FIXED-ONE-RV64-NEXT: auipc zero, 1
; FIXED-ONE-RV64-NEXT: li a0, 2
; FIXED-ONE-RV64-NEXT: sw a0, 0(a1)
; FIXED-ONE-RV64-NEXT: ret
@@ -122,12 +120,12 @@ labelB: ; preds = %labelA, %entry
define void @call(ptr %0) {
; CHECK-LABEL: call:
; CHECK: # %bb.0:
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: jr a0
;
; FIXED-ONE-LABEL: call:
; FIXED-ONE: # %bb.0:
-; FIXED-ONE-NEXT: lpad 1
+; FIXED-ONE-NEXT: auipc zero, 1
; FIXED-ONE-NEXT: lui t2, 1
; FIXED-ONE-NEXT: jr a0
tail call void %0()
@@ -139,15 +137,15 @@ declare dso_local i32 @__gxx_personality_v0(...)
define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; RV32-LABEL: invoke:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: lpad 0
+; RV32-NEXT: auipc zero, 0
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_remember_state
-; RV32-NEXT: .Ltmp0:
+; RV32-NEXT: .Ltmp0: # EH_LABEL
; RV32-NEXT: jalr a0
-; RV32-NEXT: .Ltmp1:
+; RV32-NEXT: .Ltmp1: # EH_LABEL
; RV32-NEXT: .LBB2_1: # %try.cont
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
@@ -156,20 +154,20 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; RV32-NEXT: ret
; RV32-NEXT: .LBB2_2: # %lpad
; RV32-NEXT: .cfi_restore_state
-; RV32-NEXT: .Ltmp2:
+; RV32-NEXT: .Ltmp2: # EH_LABEL
; RV32-NEXT: j .LBB2_1
;
; RV64-LABEL: invoke:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_remember_state
-; RV64-NEXT: .Ltmp0:
+; RV64-NEXT: .Ltmp0: # EH_LABEL
; RV64-NEXT: jalr a0
-; RV64-NEXT: .Ltmp1:
+; RV64-NEXT: .Ltmp1: # EH_LABEL
; RV64-NEXT: .LBB2_1: # %try.cont
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
@@ -178,21 +176,21 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; RV64-NEXT: ret
; RV64-NEXT: .LBB2_2: # %lpad
; RV64-NEXT: .cfi_restore_state
-; RV64-NEXT: .Ltmp2:
+; RV64-NEXT: .Ltmp2: # EH_LABEL
; RV64-NEXT: j .LBB2_1
;
; FIXED-ONE-RV32-LABEL: invoke:
; FIXED-ONE-RV32: # %bb.0: # %entry
-; FIXED-ONE-RV32-NEXT: lpad 1
+; FIXED-ONE-RV32-NEXT: auipc zero, 1
; FIXED-ONE-RV32-NEXT: addi sp, sp, -16
; FIXED-ONE-RV32-NEXT: .cfi_def_cfa_offset 16
; FIXED-ONE-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; FIXED-ONE-RV32-NEXT: .cfi_offset ra, -4
; FIXED-ONE-RV32-NEXT: .cfi_remember_state
-; FIXED-ONE-RV32-NEXT: .Ltmp0:
+; FIXED-ONE-RV32-NEXT: .Ltmp0: # EH_LABEL
; FIXED-ONE-RV32-NEXT: lui t2, 1
; FIXED-ONE-RV32-NEXT: jalr a0
-; FIXED-ONE-RV32-NEXT: .Ltmp1:
+; FIXED-ONE-RV32-NEXT: .Ltmp1: # EH_LABEL
; FIXED-ONE-RV32-NEXT: .LBB2_1: # %try.cont
; FIXED-ONE-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; FIXED-ONE-RV32-NEXT: .cfi_restore ra
@@ -201,21 +199,21 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; FIXED-ONE-RV32-NEXT: ret
; FIXED-ONE-RV32-NEXT: .LBB2_2: # %lpad
; FIXED-ONE-RV32-NEXT: .cfi_restore_state
-; FIXED-ONE-RV32-NEXT: .Ltmp2:
+; FIXED-ONE-RV32-NEXT: .Ltmp2: # EH_LABEL
; FIXED-ONE-RV32-NEXT: j .LBB2_1
;
; FIXED-ONE-RV64-LABEL: invoke:
; FIXED-ONE-RV64: # %bb.0: # %entry
-; FIXED-ONE-RV64-NEXT: lpad 1
+; FIXED-ONE-RV64-NEXT: auipc zero, 1
; FIXED-ONE-RV64-NEXT: addi sp, sp, -16
; FIXED-ONE-RV64-NEXT: .cfi_def_cfa_offset 16
; FIXED-ONE-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; FIXED-ONE-RV64-NEXT: .cfi_offset ra, -8
; FIXED-ONE-RV64-NEXT: .cfi_remember_state
-; FIXED-ONE-RV64-NEXT: .Ltmp0:
+; FIXED-ONE-RV64-NEXT: .Ltmp0: # EH_LABEL
; FIXED-ONE-RV64-NEXT: lui t2, 1
; FIXED-ONE-RV64-NEXT: jalr a0
-; FIXED-ONE-RV64-NEXT: .Ltmp1:
+; FIXED-ONE-RV64-NEXT: .Ltmp1: # EH_LABEL
; FIXED-ONE-RV64-NEXT: .LBB2_1: # %try.cont
; FIXED-ONE-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; FIXED-ONE-RV64-NEXT: .cfi_restore ra
@@ -224,7 +222,7 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; FIXED-ONE-RV64-NEXT: ret
; FIXED-ONE-RV64-NEXT: .LBB2_2: # %lpad
; FIXED-ONE-RV64-NEXT: .cfi_restore_state
-; FIXED-ONE-RV64-NEXT: .Ltmp2:
+; FIXED-ONE-RV64-NEXT: .Ltmp2: # EH_LABEL
; FIXED-ONE-RV64-NEXT: j .LBB2_1
entry:
invoke void %f() to label %try.cont unwind label %lpad
@@ -241,12 +239,12 @@ try.cont:
define void @external() {
; CHECK-LABEL: external:
; CHECK: # %bb.0:
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: ret
;
; FIXED-ONE-LABEL: external:
; FIXED-ONE: # %bb.0:
-; FIXED-ONE-NEXT: lpad 1
+; FIXED-ONE-NEXT: auipc zero, 1
; FIXED-ONE-NEXT: ret
ret void
}
@@ -268,12 +266,12 @@ define internal void @internal() {
define internal void @internal2() {
; CHECK-LABEL: internal2:
; CHECK: # %bb.0:
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: ret
;
; FIXED-ONE-LABEL: internal2:
; FIXED-ONE: # %bb.0:
-; FIXED-ONE-NEXT: lpad 1
+; FIXED-ONE-NEXT: auipc zero, 1
; FIXED-ONE-NEXT: ret
ret void
}
@@ -295,14 +293,14 @@ declare i32 @setjmp(ptr) returns_twice
define i32 @test_returns_twice() {
; RV32-LABEL: test_returns_twice:
; RV32: # %bb.0:
-; RV32-NEXT: lpad 0
+; RV32-NEXT: auipc zero, 0
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call setjmp
-; RV32-NEXT: lpad 0
+; RV32-NEXT: auipc zero, 0
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 16
@@ -311,14 +309,14 @@ define i32 @test_returns_twice() {
;
; RV64-LABEL: test_returns_twice:
; RV64: # %bb.0:
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: addi a0, sp, 4
; RV64-NEXT: call setjmp
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 16
@@ -327,14 +325,14 @@ define i32 @test_returns_twice() {
;
; FIXED-ONE-RV32-LABEL: test_returns_twice:
; FIXED-ONE-RV32: # %bb.0:
-; FIXED-ONE-RV32-NEXT: lpad 1
+; FIXED-ONE-RV32-NEXT: auipc zero, 1
; FIXED-ONE-RV32-NEXT: addi sp, sp, -16
; FIXED-ONE-RV32-NEXT: .cfi_def_cfa_offset 16
; FIXED-ONE-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; FIXED-ONE-RV32-NEXT: .cfi_offset ra, -4
; FIXED-ONE-RV32-NEXT: addi a0, sp, 8
; FIXED-ONE-RV32-NEXT: call setjmp
-; FIXED-ONE-RV32-NEXT: lpad 1
+; FIXED-ONE-RV32-NEXT: auipc zero, 1
; FIXED-ONE-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; FIXED-ONE-RV32-NEXT: .cfi_restore ra
; FIXED-ONE-RV32-NEXT: addi sp, sp, 16
@@ -343,14 +341,14 @@ define i32 @test_returns_twice() {
;
; FIXED-ONE-RV64-LABEL: test_returns_twice:
; FIXED-ONE-RV64: # %bb.0:
-; FIXED-ONE-RV64-NEXT: lpad 1
+; FIXED-ONE-RV64-NEXT: auipc zero, 1
; FIXED-ONE-RV64-NEXT: addi sp, sp, -16
; FIXED-ONE-RV64-NEXT: .cfi_def_cfa_offset 16
; FIXED-ONE-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; FIXED-ONE-RV64-NEXT: .cfi_offset ra, -8
; FIXED-ONE-RV64-NEXT: addi a0, sp, 4
; FIXED-ONE-RV64-NEXT: call setjmp
-; FIXED-ONE-RV64-NEXT: lpad 1
+; FIXED-ONE-RV64-NEXT: auipc zero, 1
; FIXED-ONE-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; FIXED-ONE-RV64-NEXT: .cfi_restore ra
; FIXED-ONE-RV64-NEXT: addi sp, sp, 16
@@ -360,3 +358,8 @@ define i32 @test_returns_twice() {
%call = call i32 @setjmp(ptr %buf)
ret i32 %call
}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/machine-outliner-lpad.ll b/llvm/test/CodeGen/RISCV/machine-outliner-lpad.ll
index a6e019981f26a..1c42f7ff2dbb0 100644
--- a/llvm/test/CodeGen/RISCV/machine-outliner-lpad.ll
+++ b/llvm/test/CodeGen/RISCV/machine-outliner-lpad.ll
@@ -1,11 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple riscv64 -mattr=+experimental-zicfilp < %s | FileCheck %s
-; RUN: llc -mtriple riscv32 -mattr=+experimental-zicfilp < %s | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+zicfilp-unlabeled < %s | FileCheck %s
+; RUN: llc -mtriple riscv32 -mattr=+zicfilp-unlabeled < %s | FileCheck %s
define i16 @test1(i16 %x) #0 {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: tail OUTLINED_FUNCTION_0
entry:
%y = add i16 5, %x
@@ -16,7 +16,7 @@ entry:
define i16 @test2(i16 %x) #0 {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: tail OUTLINED_FUNCTION_0
entry:
%y = add i16 5, %x
@@ -27,7 +27,7 @@ entry:
define i16 @test3(i16 %x) #0 {
; CHECK-LABEL: test3:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: tail OUTLINED_FUNCTION_0
entry:
%y = add i16 5, %x
@@ -38,7 +38,7 @@ entry:
define i16 @test4(i16 %x) #0 {
; CHECK-LABEL: test4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: tail OUTLINED_FUNCTION_0
entry:
%y = add i16 5, %x
@@ -49,7 +49,7 @@ entry:
define i16 @main(i16 %x) #0 {
; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lpad 0
+; CHECK-NEXT: auipc zero, 0
; CHECK-NEXT: tail OUTLINED_FUNCTION_0
entry:
%y = add i16 5, %x
diff --git a/llvm/test/CodeGen/RISCV/nest-register.ll b/llvm/test/CodeGen/RISCV/nest-register.ll
index 6e892e05c4297..e222beee45783 100644
--- a/llvm/test/CodeGen/RISCV/nest-register.ll
+++ b/llvm/test/CodeGen/RISCV/nest-register.ll
@@ -3,10 +3,6 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfilp -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV64I-ZICFILP %s
-; RUN: not llc -mtriple=riscv64 -target-abi=lp64e -mattr=+experimental-zicfilp \
-; RUN: -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=LP64E-ZICFILP %s
; Tests that the 'nest' parameter attribute causes the relevant parameter to be
; passed in the right register.
@@ -21,12 +17,6 @@ define ptr @nest_receiver(ptr nest %arg) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a0, t2
; RV64I-NEXT: ret
-;
-; RV64I-ZICFILP-LABEL: nest_receiver:
-; RV64I-ZICFILP: # %bb.0:
-; RV64I-ZICFILP-NEXT: lpad 0
-; RV64I-ZICFILP-NEXT: mv a0, t3
-; RV64I-ZICFILP-NEXT: ret
ret ptr %arg
}
@@ -50,22 +40,6 @@ define ptr @nest_caller(ptr %arg) nounwind {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
-;
-; RV64I-ZICFILP-LABEL: nest_caller:
-; RV64I-ZICFILP: # %bb.0:
-; RV64I-ZICFILP-NEXT: lpad 0
-; RV64I-ZICFILP-NEXT: addi sp, sp, -16
-; RV64I-ZICFILP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-ZICFILP-NEXT: mv t3, a0
-; RV64I-ZICFILP-NEXT: call nest_receiver
-; RV64I-ZICFILP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-ZICFILP-NEXT: addi sp, sp, 16
-; RV64I-ZICFILP-NEXT: ret
%result = call ptr @nest_receiver(ptr nest %arg)
ret ptr %result
}
-
-; LP64E-ZICFILP: LLVM ERROR: Nested functions with control flow protection are not usable with ILP32E or LP64E ABI.
-!llvm.module.flags = !{!0}
-
-!0 = !{i32 8, !"cf-protection-branch", i32 1}
diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll
index 06a818516c149..09f04a9b8ccd1 100644
--- a/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll
@@ -1,15 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -O0 -mtriple=riscv64 -mattr=+experimental-zicfilp -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV64 %s
-; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -mattr=+experimental-zicfilp -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV64-LINUX %s
+; RUN: llc -O0 -mtriple=riscv64 -mattr=+zicfilp-unlabeled -verify-machineinstrs < %s | FileCheck -check-prefix=RV64 %s
+; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -mattr=+zicfilp-unlabeled -verify-machineinstrs < %s | FileCheck -check-prefix=RV64-LINUX %s
declare i64 @f(ptr nest, i64)
define i64 @test0(i64 %n, ptr %p) nounwind {
; RV64-LABEL: test0:
; RV64: # %bb.0:
-; RV64-NEXT: lpad 0
+; RV64-NEXT: auipc zero, 0
; RV64-NEXT: addi sp, sp, -64
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
@@ -48,7 +46,7 @@ define i64 @test0(i64 %n, ptr %p) nounwind {
;
; RV64-LINUX-LABEL: test0:
; RV64-LINUX: # %bb.0:
-; RV64-LINUX-NEXT: lpad 0
+; RV64-LINUX-NEXT: auipc zero, 0
; RV64-LINUX-NEXT: addi sp, sp, -64
; RV64-LINUX-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-LINUX-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
@@ -92,6 +90,7 @@ define i64 @test0(i64 %n, ptr %p) nounwind {
ret i64 %ret
}
-!llvm.module.flags = !{!0}
+!llvm.module.flags = !{!0, !1}
!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/tail-calls.ll b/llvm/test/CodeGen/RISCV/tail-calls.ll
index 6756fea8a1f85..5d0576ee7f287 100644
--- a/llvm/test/CodeGen/RISCV/tail-calls.ll
+++ b/llvm/test/CodeGen/RISCV/tail-calls.ll
@@ -1,8 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv32-unknown-linux-gnu -o - %s | FileCheck %s
-; RUN: llc -mtriple riscv32-unknown-linux-gnu -mattr=experimental-zicfilp \
-; RUN: -code-model=large -o - %s \
-; RUN: | FileCheck %s -check-prefix=CHECK-LARGE-ZICFILP
; RUN: llc -mtriple riscv32-unknown-elf -o - %s | FileCheck %s
; Perform tail call optimization for global address.
@@ -11,14 +8,6 @@ define i32 @caller_tail(i32 %i) nounwind {
; CHECK-LABEL: caller_tail:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: tail callee_tail
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_tail:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi0:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi0)(a1)
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
entry:
%r = tail call i32 @callee_tail(i32 %i)
ret i32 %r
@@ -36,21 +25,6 @@ define void @caller_extern(ptr %src) optsize {
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: tail memcpy
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_extern:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi1:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi2:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a2, %pcrel_hi(.LCPI1_1)
-; CHECK-LARGE-ZICFILP-NEXT: lw a1, %pcrel_lo(.Lpcrel_hi1)(a1)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi2)(a2)
-; CHECK-LARGE-ZICFILP-NEXT: li a2, 7
-; CHECK-LARGE-ZICFILP-NEXT: mv a3, a0
-; CHECK-LARGE-ZICFILP-NEXT: mv a0, a1
-; CHECK-LARGE-ZICFILP-NEXT: mv a1, a3
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
entry:
tail call void @llvm.memcpy.p0.p0.i32(ptr @dest, ptr %src, i32 7, i1 false)
ret void
@@ -68,21 +42,6 @@ define void @caller_extern_pgso(ptr %src) !prof !14 {
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: tail memcpy
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_extern_pgso:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi3:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI2_0)
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi4:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a2, %pcrel_hi(.LCPI2_1)
-; CHECK-LARGE-ZICFILP-NEXT: lw a1, %pcrel_lo(.Lpcrel_hi3)(a1)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi4)(a2)
-; CHECK-LARGE-ZICFILP-NEXT: li a2, 7
-; CHECK-LARGE-ZICFILP-NEXT: mv a3, a0
-; CHECK-LARGE-ZICFILP-NEXT: mv a0, a1
-; CHECK-LARGE-ZICFILP-NEXT: mv a1, a3
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
entry:
tail call void @llvm.memcpy.p0.p0.i32(ptr @dest_pgso, ptr %src, i32 7, i1 false)
ret void
@@ -103,21 +62,6 @@ define void @caller_indirect_tail(i32 %a) nounwind {
; CHECK-NEXT: lui t1, %hi(callee_indirect1)
; CHECK-NEXT: addi t1, t1, %lo(callee_indirect1)
; CHECK-NEXT: jr t1
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_indirect_tail:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: beqz a0, .LBB3_2
-; CHECK-LARGE-ZICFILP-NEXT: # %bb.1: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi6:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t1, %pcrel_lo(.Lpcrel_hi6)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t1
-; CHECK-LARGE-ZICFILP-NEXT: .LBB3_2:
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi5:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI3_1)
-; CHECK-LARGE-ZICFILP-NEXT: lw t1, %pcrel_lo(.Lpcrel_hi5)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t1
entry:
%tobool = icmp eq i32 %a, 0
%callee = select i1 %tobool, ptr @callee_indirect1, ptr @callee_indirect2
@@ -139,19 +83,6 @@ define i32 @caller_indirect_no_t0(ptr %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5
; CHECK-NEXT: mv a5, a6
; CHECK-NEXT: mv a6, a7
; CHECK-NEXT: jr t1
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_indirect_no_t0:
-; CHECK-LARGE-ZICFILP: # %bb.0:
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: mv t1, a0
-; CHECK-LARGE-ZICFILP-NEXT: mv a0, a1
-; CHECK-LARGE-ZICFILP-NEXT: mv a1, a2
-; CHECK-LARGE-ZICFILP-NEXT: mv a2, a3
-; CHECK-LARGE-ZICFILP-NEXT: mv a3, a4
-; CHECK-LARGE-ZICFILP-NEXT: mv a4, a5
-; CHECK-LARGE-ZICFILP-NEXT: mv a5, a6
-; CHECK-LARGE-ZICFILP-NEXT: mv a6, a7
-; CHECK-LARGE-ZICFILP-NEXT: jr t1
%9 = tail call i32 %0(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7)
ret i32 %9
}
@@ -174,26 +105,6 @@ define void @caller_varargs(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_varargs:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi7:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a2, %pcrel_hi(.LCPI5_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi7)(a2)
-; CHECK-LARGE-ZICFILP-NEXT: sw a0, 0(sp)
-; CHECK-LARGE-ZICFILP-NEXT: mv a2, a1
-; CHECK-LARGE-ZICFILP-NEXT: mv a3, a0
-; CHECK-LARGE-ZICFILP-NEXT: mv a4, a0
-; CHECK-LARGE-ZICFILP-NEXT: mv a5, a1
-; CHECK-LARGE-ZICFILP-NEXT: mv a6, a1
-; CHECK-LARGE-ZICFILP-NEXT: mv a7, a0
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
%call = tail call i32 (i32, ...) @callee_varargs(i32 %a, i32 %b, i32 %b, i32 %a, i32 %a, i32 %b, i32 %b, i32 %a, i32 %a)
ret void
@@ -222,31 +133,6 @@ define i32 @caller_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g
; CHECK-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_args:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -32
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: lw t0, 32(sp)
-; CHECK-LARGE-ZICFILP-NEXT: lw t1, 36(sp)
-; CHECK-LARGE-ZICFILP-NEXT: lw t3, 40(sp)
-; CHECK-LARGE-ZICFILP-NEXT: lw t4, 44(sp)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, 48(sp)
-; CHECK-LARGE-ZICFILP-NEXT: lw t5, 52(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw t2, 16(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw t5, 20(sp)
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi8:
-; CHECK-LARGE-ZICFILP-NEXT: auipc t2, %pcrel_hi(.LCPI6_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi8)(t2)
-; CHECK-LARGE-ZICFILP-NEXT: sw t0, 0(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw t1, 4(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw t3, 8(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw t4, 12(sp)
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 32
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
%r = tail call i32 @callee_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n)
ret i32 %r
@@ -269,25 +155,6 @@ define void @caller_indirect_args() nounwind {
; CHECK-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_indirect_args:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -32
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: lui a1, 262128
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi9:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI7_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi9)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: mv a0, sp
-; CHECK-LARGE-ZICFILP-NEXT: sw zero, 0(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw zero, 4(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw zero, 8(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw a1, 12(sp)
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 32
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
%call = tail call i32 @callee_indirect_args(fp128 0xL00000000000000003FFF000000000000)
ret void
@@ -299,14 +166,6 @@ define void @caller_weak() nounwind {
; CHECK-LABEL: caller_weak:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: tail callee_weak
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_weak:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi10:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI8_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi10)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
entry:
tail call void @callee_weak()
ret void
@@ -355,48 +214,6 @@ define void @caller_irq() nounwind "interrupt"="machine" {
; CHECK-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 64
; CHECK-NEXT: mret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_irq:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -64
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi11:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI9_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi11)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 64
-; CHECK-LARGE-ZICFILP-NEXT: mret
entry:
tail call void @callee_irq()
ret void
@@ -418,22 +235,6 @@ define i32 @caller_byval() nounwind {
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_byval:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: lw a0, 8(sp)
-; CHECK-LARGE-ZICFILP-NEXT: sw a0, 4(sp)
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi12:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI10_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi12)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: addi a0, sp, 4
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
%a = alloca ptr
%r = tail call i32 @callee_byval(ptr byval(ptr) %a)
@@ -456,22 +257,6 @@ define void @caller_nostruct() nounwind {
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_nostruct:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi13:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI11_0)
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi14:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI11_1)
-; CHECK-LARGE-ZICFILP-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi13)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi14)(a1)
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
tail call void @callee_struct(ptr sret(%struct.A) @a)
ret void
@@ -488,19 +273,6 @@ define void @caller_struct(ptr sret(%struct.A) %a) nounwind {
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: caller_struct:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi15:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI12_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi15)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
tail call void @callee_nostruct()
ret void
@@ -516,19 +288,6 @@ define i32 @disable_tail_calls(i32 %i) nounwind "disable-tail-calls"="true" {
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-;
-; CHECK-LARGE-ZICFILP-LABEL: disable_tail_calls:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, -16
-; CHECK-LARGE-ZICFILP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi16:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a1, %pcrel_hi(.LCPI13_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi16)(a1)
-; CHECK-LARGE-ZICFILP-NEXT: jalr t2
-; CHECK-LARGE-ZICFILP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-LARGE-ZICFILP-NEXT: addi sp, sp, 16
-; CHECK-LARGE-ZICFILP-NEXT: ret
entry:
%rv = tail call i32 @callee_tail(i32 %i)
ret i32 %rv
@@ -555,35 +314,6 @@ define i32 @duplicate_returns(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: tail test1
; CHECK-NEXT: .LBB14_6: # %if.else8
; CHECK-NEXT: tail test3
-;
-; CHECK-LARGE-ZICFILP-LABEL: duplicate_returns:
-; CHECK-LARGE-ZICFILP: # %bb.0: # %entry
-; CHECK-LARGE-ZICFILP-NEXT: lpad 0
-; CHECK-LARGE-ZICFILP-NEXT: beqz a0, .LBB14_4
-; CHECK-LARGE-ZICFILP-NEXT: # %bb.1: # %if.else
-; CHECK-LARGE-ZICFILP-NEXT: beqz a1, .LBB14_5
-; CHECK-LARGE-ZICFILP-NEXT: # %bb.2: # %if.else4
-; CHECK-LARGE-ZICFILP-NEXT: bge a1, a0, .LBB14_6
-; CHECK-LARGE-ZICFILP-NEXT: # %bb.3: # %if.then6
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi19:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI14_1)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi19)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
-; CHECK-LARGE-ZICFILP-NEXT: .LBB14_4: # %if.then
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi17:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI14_3)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi17)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
-; CHECK-LARGE-ZICFILP-NEXT: .LBB14_5: # %if.then2
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi18:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI14_2)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi18)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
-; CHECK-LARGE-ZICFILP-NEXT: .LBB14_6: # %if.else8
-; CHECK-LARGE-ZICFILP-NEXT: .Lpcrel_hi20:
-; CHECK-LARGE-ZICFILP-NEXT: auipc a0, %pcrel_hi(.LCPI14_0)
-; CHECK-LARGE-ZICFILP-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi20)(a0)
-; CHECK-LARGE-ZICFILP-NEXT: jr t2
entry:
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-disabled-indirect-branch.ll b/llvm/test/CodeGen/RISCV/zicfilp-disabled-indirect-branch.ll
new file mode 100644
index 0000000000000..05982be23931f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zicfilp-disabled-indirect-branch.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mtriple=riscv64 -stop-after=finalize-isel < %s | FileCheck %s
+
+ at brind.arr = internal unnamed_addr constant [2 x ptr] [ptr blockaddress(@brind, %5), ptr blockaddress(@brind, %8)], align 8
+ at x = dso_local global i32 0, align 4
+
+define void @brind(i32 noundef signext %0) {
+ ; CHECK-LABEL: name: brind
+ ; CHECK: PseudoBRIND killed [[VAR:%.*]], 0
+ %2 = sext i32 %0 to i64
+ %3 = getelementptr inbounds [2 x ptr], ptr @brind.arr, i64 0, i64 %2
+ %4 = load ptr, ptr %3, align 8
+ indirectbr ptr %4, [label %5, label %8]
+
+5: ; preds = %1
+ %6 = load i32, ptr @x, align 4
+ %7 = add nsw i32 %6, 2
+ store i32 %7, ptr @x, align 4
+ br label %8
+
+8: ; preds = %5, %1
+ %9 = load i32, ptr @x, align 4
+ %10 = add nsw i32 %9, 1
+ store i32 %10, ptr @x, align 4
+ ret void
+}
+
+define i32 @indirect_call(ptr %0) {
+ ; CHECK-LABEL: name: indirect_call
+ ; CHECK: PseudoCALLIndirect
+ call void %0()
+ ret i32 0
+}
+
+
+define void @indirect_tail(ptr %0) {
+ ; CHECK-LABEL: name: indirect_tail
+ ; CHECK: PseudoTAILIndirect
+ tail call void %0()
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-indirect-branch.ll b/llvm/test/CodeGen/RISCV/zicfilp-indirect-branch.ll
index bccd28ee7e2b3..299bb76d9139a 100644
--- a/llvm/test/CodeGen/RISCV/zicfilp-indirect-branch.ll
+++ b/llvm/test/CodeGen/RISCV/zicfilp-indirect-branch.ll
@@ -1,15 +1,11 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -stop-after=finalize-isel < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfilp -stop-after=finalize-isel < %s | FileCheck -check-prefixes=ZICFILP %s
+; RUN: llc -mtriple=riscv64 -mattr=+zicfilp-unlabeled -stop-after=finalize-isel < %s | FileCheck %s
@brind.arr = internal unnamed_addr constant [2 x ptr] [ptr blockaddress(@brind, %5), ptr blockaddress(@brind, %8)], align 8
@x = dso_local global i32 0, align 4
define void @brind(i32 noundef signext %0) {
; CHECK-LABEL: name: brind
- ; CHECK: PseudoBRIND killed [[VAR:%.*]], 0
- ; ZICFILP-LABEL: name: brind
- ; ZICFILP: PseudoBRINDNonX7 killed [[VAR:%.*]], 0
+ ; CHECK: PseudoBRINDNonX7 killed [[VAR:%.*]], 0
%2 = sext i32 %0 to i64
%3 = getelementptr inbounds [2 x ptr], ptr @brind.arr, i64 0, i64 %2
%4 = load ptr, ptr %3, align 8
@@ -30,9 +26,7 @@ define void @brind(i32 noundef signext %0) {
define i32 @indirect_call(ptr %0) {
; CHECK-LABEL: name: indirect_call
- ; CHECK: PseudoCALLIndirect
- ; ZICFILP-LABEL: name: indirect_call
- ; ZICFILP: PseudoCALLIndirectNonX7
+ ; CHECK: PseudoCALLIndirectNonX7
call void %0()
ret i32 0
}
@@ -40,9 +34,12 @@ define i32 @indirect_call(ptr %0) {
define void @indirect_tail(ptr %0) {
; CHECK-LABEL: name: indirect_tail
- ; CHECK: PseudoTAILIndirect
- ; ZICFILP-LABEL: name: indirect_tail
- ; ZICFILP: PseudoTAILIndirectNonX7
+ ; CHECK: PseudoTAILIndirectNonX7
tail call void %0()
ret void
}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll
new file mode 100644
index 0000000000000..1f5d1c8a8d5e1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll
@@ -0,0 +1,1058 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+zicfilp-unlabeled < %s \
+; RUN: | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: bnez a0, .LBB0_1
+; CHECK-NEXT: j .LBB0_2
+; CHECK-NEXT: .LBB0_1: # %iftrue
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 4096
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: .LBB0_2: # %tail
+; CHECK-NEXT: ret
+ br i1 %a, label %iftrue, label %tail
+
+iftrue:
+ call void asm sideeffect ".space 4096", ""()
+ br label %tail
+
+tail:
+ ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: bnez a0, .LBB1_1
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: jump .LBB1_2, a0
+; CHECK-NEXT: .LBB1_1: # %iftrue
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB1_3
+; CHECK-NEXT: .LBB1_2: # %jmp
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: .LBB1_3: # %tail
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ br i1 %a, label %iftrue, label %jmp
+
+jmp:
+ call void asm sideeffect "", ""()
+ br label %tail
+
+iftrue:
+ call void asm sideeffect "", ""()
+ br label %space
+
+space:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %tail
+
+tail:
+ ret i32 1
+}
+
+define void @relax_jal_spill_32() {
+; CHECK-LABEL: relax_jal_spill_32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -64
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -4
+; CHECK-NEXT: .cfi_offset s0, -8
+; CHECK-NEXT: .cfi_offset s1, -12
+; CHECK-NEXT: .cfi_offset s2, -16
+; CHECK-NEXT: .cfi_offset s3, -20
+; CHECK-NEXT: .cfi_offset s4, -24
+; CHECK-NEXT: .cfi_offset s5, -28
+; CHECK-NEXT: .cfi_offset s6, -32
+; CHECK-NEXT: .cfi_offset s7, -36
+; CHECK-NEXT: .cfi_offset s8, -40
+; CHECK-NEXT: .cfi_offset s9, -44
+; CHECK-NEXT: .cfi_offset s10, -48
+; CHECK-NEXT: .cfi_offset s11, -52
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li ra, 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t0, 5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t1, 6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t2, 7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s0, 8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s1, 9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a0, 10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a1, 11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a2, 12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a3, 13
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a4, 14
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a5, 15
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a6, 16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a7, 17
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s2, 18
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s3, 19
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s4, 20
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s5, 21
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s6, 22
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s7, 23
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s8, 24
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s9, 25
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s10, 26
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s11, 27
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t3, 28
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t4, 29
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t5, 30
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t6, 31
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: beq t5, t6, .LBB2_1
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: jump .LBB2_4, s11
+; CHECK-NEXT: .LBB2_1: # %branch_1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB2_2
+; CHECK-NEXT: .LBB2_4: # %branch_2
+; CHECK-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .LBB2_2: # %branch_2
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use ra
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: .cfi_restore s2
+; CHECK-NEXT: .cfi_restore s3
+; CHECK-NEXT: .cfi_restore s4
+; CHECK-NEXT: .cfi_restore s5
+; CHECK-NEXT: .cfi_restore s6
+; CHECK-NEXT: .cfi_restore s7
+; CHECK-NEXT: .cfi_restore s8
+; CHECK-NEXT: .cfi_restore s9
+; CHECK-NEXT: .cfi_restore s10
+; CHECK-NEXT: .cfi_restore s11
+; CHECK-NEXT: addi sp, sp, 64
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+ %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+ %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+ %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+ %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+ %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+ %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+ %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+ %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+ %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+ %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+ %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+ %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+ %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+ %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+ %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+ %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+ %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+ %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+ %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+ %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+ %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+ %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+ %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+ %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+ %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+ %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+ %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+ %cmp = icmp eq i32 %t5, %t6
+ br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %branch_2
+
+branch_2:
+ call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+ call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+ call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+ call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+ call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+ call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+ call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+ call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+ call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+ call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+ call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+ call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+ call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+ call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+ call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+ call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+ call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+ call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+ call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+ call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+ call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+ call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+ call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+ call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+ call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+ call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+ call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+ call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+ ret void
+}
+
+define void @relax_jal_spill_32_adjust_spill_slot() {
+ ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+ ; is out the range of 12-bit signed integer, check whether the spill slot is
+ ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_32_adjust_spill_slot:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -2032
+; CHECK-NEXT: .cfi_def_cfa_offset 2032
+; CHECK-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s1, 2020(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s2, 2016(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s3, 2012(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s4, 2008(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s5, 2004(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s6, 2000(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s7, 1996(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s8, 1992(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s9, 1988(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s10, 1984(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s11, 1980(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -4
+; CHECK-NEXT: .cfi_offset s0, -8
+; CHECK-NEXT: .cfi_offset s1, -12
+; CHECK-NEXT: .cfi_offset s2, -16
+; CHECK-NEXT: .cfi_offset s3, -20
+; CHECK-NEXT: .cfi_offset s4, -24
+; CHECK-NEXT: .cfi_offset s5, -28
+; CHECK-NEXT: .cfi_offset s6, -32
+; CHECK-NEXT: .cfi_offset s7, -36
+; CHECK-NEXT: .cfi_offset s8, -40
+; CHECK-NEXT: .cfi_offset s9, -44
+; CHECK-NEXT: .cfi_offset s10, -48
+; CHECK-NEXT: .cfi_offset s11, -52
+; CHECK-NEXT: addi s0, sp, 2032
+; CHECK-NEXT: .cfi_def_cfa s0, 0
+; CHECK-NEXT: lui a0, 2
+; CHECK-NEXT: addi a0, a0, -2032
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: srli a0, sp, 12
+; CHECK-NEXT: slli sp, a0, 12
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li ra, 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t0, 5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t1, 6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t2, 7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s0, 8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s1, 9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a0, 10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a1, 11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a2, 12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a3, 13
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a4, 14
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a5, 15
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a6, 16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a7, 17
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s2, 18
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s3, 19
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s4, 20
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s5, 21
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s6, 22
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s7, 23
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s8, 24
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s9, 25
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s10, 26
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s11, 27
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t3, 28
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t4, 29
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t5, 30
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t6, 31
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: beq t5, t6, .LBB3_1
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: jump .LBB3_4, s11
+; CHECK-NEXT: .LBB3_1: # %branch_1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB3_2
+; CHECK-NEXT: .LBB3_4: # %branch_2
+; CHECK-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .LBB3_2: # %branch_2
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use ra
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: addi sp, s0, -2032
+; CHECK-NEXT: .cfi_def_cfa sp, 2032
+; CHECK-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s3, 2012(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s4, 2008(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s5, 2004(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s6, 2000(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s7, 1996(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s8, 1992(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s9, 1988(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s10, 1984(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s11, 1980(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: .cfi_restore s2
+; CHECK-NEXT: .cfi_restore s3
+; CHECK-NEXT: .cfi_restore s4
+; CHECK-NEXT: .cfi_restore s5
+; CHECK-NEXT: .cfi_restore s6
+; CHECK-NEXT: .cfi_restore s7
+; CHECK-NEXT: .cfi_restore s8
+; CHECK-NEXT: .cfi_restore s9
+; CHECK-NEXT: .cfi_restore s10
+; CHECK-NEXT: .cfi_restore s11
+; CHECK-NEXT: addi sp, sp, 2032
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %stack_obj = alloca i32, align 4096
+
+ %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+ %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+ %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+ %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+ %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+ %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+ %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+ %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+ %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+ %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+ %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+ %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+ %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+ %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+ %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+ %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+ %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+ %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+ %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+ %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+ %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+ %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+ %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+ %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+ %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+ %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+ %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+ %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+ %cmp = icmp eq i32 %t5, %t6
+ br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %branch_2
+
+branch_2:
+ call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+ call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+ call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+ call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+ call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+ call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+ call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+ call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+ call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+ call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+ call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+ call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+ call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+ call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+ call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+ call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+ call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+ call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+ call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+ call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+ call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+ call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+ call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+ call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+ call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+ call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+ call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+ call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+ ret void
+}
+
+define void @relax_jal_spill_32_restore_block_correspondence() {
+; CHECK-LABEL: relax_jal_spill_32_restore_block_correspondence:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -64
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s9, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s10, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s11, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -4
+; CHECK-NEXT: .cfi_offset s0, -8
+; CHECK-NEXT: .cfi_offset s1, -12
+; CHECK-NEXT: .cfi_offset s2, -16
+; CHECK-NEXT: .cfi_offset s3, -20
+; CHECK-NEXT: .cfi_offset s4, -24
+; CHECK-NEXT: .cfi_offset s5, -28
+; CHECK-NEXT: .cfi_offset s6, -32
+; CHECK-NEXT: .cfi_offset s7, -36
+; CHECK-NEXT: .cfi_offset s8, -40
+; CHECK-NEXT: .cfi_offset s9, -44
+; CHECK-NEXT: .cfi_offset s10, -48
+; CHECK-NEXT: .cfi_offset s11, -52
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li ra, 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t0, 5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t1, 6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t2, 7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s0, 8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s1, 9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a0, 10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a1, 11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a2, 12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a3, 13
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a4, 14
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a5, 15
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a6, 16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a7, 17
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s2, 18
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s3, 19
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s4, 20
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s5, 21
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s6, 22
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s7, 23
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s8, 24
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s9, 25
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s10, 26
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s11, 27
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t3, 28
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t4, 29
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t5, 30
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t6, 31
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bne t5, t6, .LBB4_2
+; CHECK-NEXT: j .LBB4_1
+; CHECK-NEXT: .LBB4_8: # %dest_1
+; CHECK-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .LBB4_1: # %dest_1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # dest 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB4_3
+; CHECK-NEXT: .LBB4_2: # %cond_2
+; CHECK-NEXT: bne t3, t4, .LBB4_5
+; CHECK-NEXT: .LBB4_3: # %dest_2
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # dest 2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: .LBB4_4: # %dest_3
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # dest 3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use ra
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s9, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s10, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s11, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: .cfi_restore s2
+; CHECK-NEXT: .cfi_restore s3
+; CHECK-NEXT: .cfi_restore s4
+; CHECK-NEXT: .cfi_restore s5
+; CHECK-NEXT: .cfi_restore s6
+; CHECK-NEXT: .cfi_restore s7
+; CHECK-NEXT: .cfi_restore s8
+; CHECK-NEXT: .cfi_restore s9
+; CHECK-NEXT: .cfi_restore s10
+; CHECK-NEXT: .cfi_restore s11
+; CHECK-NEXT: addi sp, sp, 64
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB4_5: # %cond_3
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: beq t1, t2, .LBB4_4
+; CHECK-NEXT: # %bb.6: # %space
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: # %bb.7: # %space
+; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: jump .LBB4_8, s11
+entry:
+ %ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
+ %t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
+ %t1 = call i32 asm sideeffect "addi t1, x0, 6", "={t1}"()
+ %t2 = call i32 asm sideeffect "addi t2, x0, 7", "={t2}"()
+ %s0 = call i32 asm sideeffect "addi s0, x0, 8", "={s0}"()
+ %s1 = call i32 asm sideeffect "addi s1, x0, 9", "={s1}"()
+ %a0 = call i32 asm sideeffect "addi a0, x0, 10", "={a0}"()
+ %a1 = call i32 asm sideeffect "addi a1, x0, 11", "={a1}"()
+ %a2 = call i32 asm sideeffect "addi a2, x0, 12", "={a2}"()
+ %a3 = call i32 asm sideeffect "addi a3, x0, 13", "={a3}"()
+ %a4 = call i32 asm sideeffect "addi a4, x0, 14", "={a4}"()
+ %a5 = call i32 asm sideeffect "addi a5, x0, 15", "={a5}"()
+ %a6 = call i32 asm sideeffect "addi a6, x0, 16", "={a6}"()
+ %a7 = call i32 asm sideeffect "addi a7, x0, 17", "={a7}"()
+ %s2 = call i32 asm sideeffect "addi s2, x0, 18", "={s2}"()
+ %s3 = call i32 asm sideeffect "addi s3, x0, 19", "={s3}"()
+ %s4 = call i32 asm sideeffect "addi s4, x0, 20", "={s4}"()
+ %s5 = call i32 asm sideeffect "addi s5, x0, 21", "={s5}"()
+ %s6 = call i32 asm sideeffect "addi s6, x0, 22", "={s6}"()
+ %s7 = call i32 asm sideeffect "addi s7, x0, 23", "={s7}"()
+ %s8 = call i32 asm sideeffect "addi s8, x0, 24", "={s8}"()
+ %s9 = call i32 asm sideeffect "addi s9, x0, 25", "={s9}"()
+ %s10 = call i32 asm sideeffect "addi s10, x0, 26", "={s10}"()
+ %s11 = call i32 asm sideeffect "addi s11, x0, 27", "={s11}"()
+ %t3 = call i32 asm sideeffect "addi t3, x0, 28", "={t3}"()
+ %t4 = call i32 asm sideeffect "addi t4, x0, 29", "={t4}"()
+ %t5 = call i32 asm sideeffect "addi t5, x0, 30", "={t5}"()
+ %t6 = call i32 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+ br label %cond_1
+
+cond_1:
+ %cmp1 = icmp eq i32 %t5, %t6
+ br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+ %cmp2 = icmp eq i32 %t3, %t4
+ br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+ %cmp3 = icmp eq i32 %t1, %t2
+ br i1 %cmp3, label %dest_3, label %space
+
+space:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %dest_1
+
+dest_1:
+ call void asm sideeffect "# dest 1", ""()
+ br label %dest_2
+
+dest_2:
+ call void asm sideeffect "# dest 2", ""()
+ br label %dest_3
+
+dest_3:
+ call void asm sideeffect "# dest 3", ""()
+ br label %tail
+
+tail:
+ call void asm sideeffect "# reg use $0", "{ra}"(i32 %ra)
+ call void asm sideeffect "# reg use $0", "{t0}"(i32 %t0)
+ call void asm sideeffect "# reg use $0", "{t1}"(i32 %t1)
+ call void asm sideeffect "# reg use $0", "{t2}"(i32 %t2)
+ call void asm sideeffect "# reg use $0", "{s0}"(i32 %s0)
+ call void asm sideeffect "# reg use $0", "{s1}"(i32 %s1)
+ call void asm sideeffect "# reg use $0", "{a0}"(i32 %a0)
+ call void asm sideeffect "# reg use $0", "{a1}"(i32 %a1)
+ call void asm sideeffect "# reg use $0", "{a2}"(i32 %a2)
+ call void asm sideeffect "# reg use $0", "{a3}"(i32 %a3)
+ call void asm sideeffect "# reg use $0", "{a4}"(i32 %a4)
+ call void asm sideeffect "# reg use $0", "{a5}"(i32 %a5)
+ call void asm sideeffect "# reg use $0", "{a6}"(i32 %a6)
+ call void asm sideeffect "# reg use $0", "{a7}"(i32 %a7)
+ call void asm sideeffect "# reg use $0", "{s2}"(i32 %s2)
+ call void asm sideeffect "# reg use $0", "{s3}"(i32 %s3)
+ call void asm sideeffect "# reg use $0", "{s4}"(i32 %s4)
+ call void asm sideeffect "# reg use $0", "{s5}"(i32 %s5)
+ call void asm sideeffect "# reg use $0", "{s6}"(i32 %s6)
+ call void asm sideeffect "# reg use $0", "{s7}"(i32 %s7)
+ call void asm sideeffect "# reg use $0", "{s8}"(i32 %s8)
+ call void asm sideeffect "# reg use $0", "{s9}"(i32 %s9)
+ call void asm sideeffect "# reg use $0", "{s10}"(i32 %s10)
+ call void asm sideeffect "# reg use $0", "{s11}"(i32 %s11)
+ call void asm sideeffect "# reg use $0", "{t3}"(i32 %t3)
+ call void asm sideeffect "# reg use $0", "{t4}"(i32 %t4)
+ call void asm sideeffect "# reg use $0", "{t5}"(i32 %t5)
+ call void asm sideeffect "# reg use $0", "{t6}"(i32 %t6)
+
+ ret void
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll
new file mode 100644
index 0000000000000..3fb7102477ed5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll
@@ -0,0 +1,1059 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+zicfilp-unlabeled < %s \
+; RUN: | FileCheck %s
+
+define void @relax_bcc(i1 %a) nounwind {
+; CHECK-LABEL: relax_bcc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: bnez a0, .LBB0_1
+; CHECK-NEXT: j .LBB0_2
+; CHECK-NEXT: .LBB0_1: # %iftrue
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 4096
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: .LBB0_2: # %tail
+; CHECK-NEXT: ret
+ br i1 %a, label %iftrue, label %tail
+
+iftrue:
+ call void asm sideeffect ".space 4096", ""()
+ br label %tail
+
+tail:
+ ret void
+}
+
+define i32 @relax_jal(i1 %a) nounwind {
+; CHECK-LABEL: relax_jal:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: bnez a0, .LBB1_1
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: jump .LBB1_2, a0
+; CHECK-NEXT: .LBB1_1: # %iftrue
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB1_3
+; CHECK-NEXT: .LBB1_2: # %jmp
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: .LBB1_3: # %tail
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ br i1 %a, label %iftrue, label %jmp
+
+jmp:
+ call void asm sideeffect "", ""()
+ br label %tail
+
+iftrue:
+ call void asm sideeffect "", ""()
+ br label %space
+
+space:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %tail
+
+tail:
+ ret i32 1
+}
+
+
+define void @relax_jal_spill_64() {
+; CHECK-LABEL: relax_jal_spill_64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -112
+; CHECK-NEXT: .cfi_def_cfa_offset 112
+; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: .cfi_offset s1, -24
+; CHECK-NEXT: .cfi_offset s2, -32
+; CHECK-NEXT: .cfi_offset s3, -40
+; CHECK-NEXT: .cfi_offset s4, -48
+; CHECK-NEXT: .cfi_offset s5, -56
+; CHECK-NEXT: .cfi_offset s6, -64
+; CHECK-NEXT: .cfi_offset s7, -72
+; CHECK-NEXT: .cfi_offset s8, -80
+; CHECK-NEXT: .cfi_offset s9, -88
+; CHECK-NEXT: .cfi_offset s10, -96
+; CHECK-NEXT: .cfi_offset s11, -104
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li ra, 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t0, 5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t1, 6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t2, 7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s0, 8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s1, 9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a0, 10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a1, 11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a2, 12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a3, 13
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a4, 14
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a5, 15
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a6, 16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a7, 17
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s2, 18
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s3, 19
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s4, 20
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s5, 21
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s6, 22
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s7, 23
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s8, 24
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s9, 25
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s10, 26
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s11, 27
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t3, 28
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t4, 29
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t5, 30
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t6, 31
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: beq t5, t6, .LBB2_1
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: jump .LBB2_4, s11
+; CHECK-NEXT: .LBB2_1: # %branch_1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB2_2
+; CHECK-NEXT: .LBB2_4: # %branch_2
+; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .LBB2_2: # %branch_2
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use ra
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: .cfi_restore s2
+; CHECK-NEXT: .cfi_restore s3
+; CHECK-NEXT: .cfi_restore s4
+; CHECK-NEXT: .cfi_restore s5
+; CHECK-NEXT: .cfi_restore s6
+; CHECK-NEXT: .cfi_restore s7
+; CHECK-NEXT: .cfi_restore s8
+; CHECK-NEXT: .cfi_restore s9
+; CHECK-NEXT: .cfi_restore s10
+; CHECK-NEXT: .cfi_restore s11
+; CHECK-NEXT: addi sp, sp, 112
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+ %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+ %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+ %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+ %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+ %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+ %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+ %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+ %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+ %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+ %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+ %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+ %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+ %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+ %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+ %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+ %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+ %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+ %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+ %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+ %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+ %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+ %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+ %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+ %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+ %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+ %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+ %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+ %cmp = icmp eq i64 %t5, %t6
+ br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %branch_2
+
+branch_2:
+ call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+ call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+ call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+ call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+ call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+ call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+ call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+ call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+ call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+ call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+ call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+ call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+ call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+ call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+ call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+ call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+ call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+ call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+ call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+ call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+ call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+ call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+ call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+ call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+ call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+ call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+ call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+ call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+ ret void
+}
+
+define void @relax_jal_spill_64_adjust_spill_slot() {
+ ; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
+ ; is out the range of 12-bit signed integer, check whether the spill slot is
+ ; adjusted to close to the stack base register.
+; CHECK-LABEL: relax_jal_spill_64_adjust_spill_slot:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -2032
+; CHECK-NEXT: .cfi_def_cfa_offset 2032
+; CHECK-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s1, 2008(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s2, 2000(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s3, 1992(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s4, 1984(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s5, 1976(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s6, 1968(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s7, 1960(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s8, 1952(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s9, 1944(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s10, 1936(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s11, 1928(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: .cfi_offset s1, -24
+; CHECK-NEXT: .cfi_offset s2, -32
+; CHECK-NEXT: .cfi_offset s3, -40
+; CHECK-NEXT: .cfi_offset s4, -48
+; CHECK-NEXT: .cfi_offset s5, -56
+; CHECK-NEXT: .cfi_offset s6, -64
+; CHECK-NEXT: .cfi_offset s7, -72
+; CHECK-NEXT: .cfi_offset s8, -80
+; CHECK-NEXT: .cfi_offset s9, -88
+; CHECK-NEXT: .cfi_offset s10, -96
+; CHECK-NEXT: .cfi_offset s11, -104
+; CHECK-NEXT: addi s0, sp, 2032
+; CHECK-NEXT: .cfi_def_cfa s0, 0
+; CHECK-NEXT: lui a0, 2
+; CHECK-NEXT: addi a0, a0, -2032
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: srli a0, sp, 12
+; CHECK-NEXT: slli sp, a0, 12
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li ra, 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t0, 5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t1, 6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t2, 7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s0, 8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s1, 9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a0, 10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a1, 11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a2, 12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a3, 13
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a4, 14
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a5, 15
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a6, 16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a7, 17
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s2, 18
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s3, 19
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s4, 20
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s5, 21
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s6, 22
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s7, 23
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s8, 24
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s9, 25
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s10, 26
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s11, 27
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t3, 28
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t4, 29
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t5, 30
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t6, 31
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: beq t5, t6, .LBB3_1
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: jump .LBB3_4, s11
+; CHECK-NEXT: .LBB3_1: # %branch_1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB3_2
+; CHECK-NEXT: .LBB3_4: # %branch_2
+; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .LBB3_2: # %branch_2
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use ra
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: addi sp, s0, -2032
+; CHECK-NEXT: .cfi_def_cfa sp, 2032
+; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s1, 2008(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s2, 2000(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s3, 1992(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s4, 1984(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s5, 1976(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s6, 1968(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s7, 1960(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s8, 1952(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s9, 1944(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s10, 1936(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s11, 1928(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: .cfi_restore s2
+; CHECK-NEXT: .cfi_restore s3
+; CHECK-NEXT: .cfi_restore s4
+; CHECK-NEXT: .cfi_restore s5
+; CHECK-NEXT: .cfi_restore s6
+; CHECK-NEXT: .cfi_restore s7
+; CHECK-NEXT: .cfi_restore s8
+; CHECK-NEXT: .cfi_restore s9
+; CHECK-NEXT: .cfi_restore s10
+; CHECK-NEXT: .cfi_restore s11
+; CHECK-NEXT: addi sp, sp, 2032
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %stack_obj = alloca i64, align 4096
+
+ %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+ %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+ %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+ %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+ %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+ %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+ %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+ %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+ %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+ %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+ %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+ %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+ %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+ %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+ %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+ %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+ %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+ %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+ %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+ %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+ %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+ %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+ %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+ %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+ %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+ %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+ %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+ %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+ %cmp = icmp eq i64 %t5, %t6
+ br i1 %cmp, label %branch_1, label %branch_2
+
+branch_1:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %branch_2
+
+branch_2:
+ call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+ call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+ call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+ call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+ call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+ call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+ call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+ call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+ call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+ call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+ call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+ call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+ call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+ call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+ call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+ call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+ call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+ call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+ call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+ call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+ call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+ call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+ call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+ call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+ call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+ call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+ call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+ call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+ ret void
+}
+
+define void @relax_jal_spill_64_restore_block_correspondence() {
+; CHECK-LABEL: relax_jal_spill_64_restore_block_correspondence:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -112
+; CHECK-NEXT: .cfi_def_cfa_offset 112
+; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: .cfi_offset s0, -16
+; CHECK-NEXT: .cfi_offset s1, -24
+; CHECK-NEXT: .cfi_offset s2, -32
+; CHECK-NEXT: .cfi_offset s3, -40
+; CHECK-NEXT: .cfi_offset s4, -48
+; CHECK-NEXT: .cfi_offset s5, -56
+; CHECK-NEXT: .cfi_offset s6, -64
+; CHECK-NEXT: .cfi_offset s7, -72
+; CHECK-NEXT: .cfi_offset s8, -80
+; CHECK-NEXT: .cfi_offset s9, -88
+; CHECK-NEXT: .cfi_offset s10, -96
+; CHECK-NEXT: .cfi_offset s11, -104
+; CHECK-NEXT: .cfi_remember_state
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li ra, 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t0, 5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t1, 6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t2, 7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s0, 8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s1, 9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a0, 10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a1, 11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a2, 12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a3, 13
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a4, 14
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a5, 15
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a6, 16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li a7, 17
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s2, 18
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s3, 19
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s4, 20
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s5, 21
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s6, 22
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s7, 23
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s8, 24
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s9, 25
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s10, 26
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li s11, 27
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t3, 28
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t4, 29
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t5, 30
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: li t6, 31
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bne t5, t6, .LBB4_2
+; CHECK-NEXT: j .LBB4_1
+; CHECK-NEXT: .LBB4_8: # %dest_1
+; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .LBB4_1: # %dest_1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # dest 1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: j .LBB4_3
+; CHECK-NEXT: .LBB4_2: # %cond_2
+; CHECK-NEXT: bne t3, t4, .LBB4_5
+; CHECK-NEXT: .LBB4_3: # %dest_2
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # dest 2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: .LBB4_4: # %dest_3
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # dest 3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use ra
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a1
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use a7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s7
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use s11
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t3
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t5
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: # reg use t6
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: .cfi_restore s2
+; CHECK-NEXT: .cfi_restore s3
+; CHECK-NEXT: .cfi_restore s4
+; CHECK-NEXT: .cfi_restore s5
+; CHECK-NEXT: .cfi_restore s6
+; CHECK-NEXT: .cfi_restore s7
+; CHECK-NEXT: .cfi_restore s8
+; CHECK-NEXT: .cfi_restore s9
+; CHECK-NEXT: .cfi_restore s10
+; CHECK-NEXT: .cfi_restore s11
+; CHECK-NEXT: addi sp, sp, 112
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB4_5: # %cond_3
+; CHECK-NEXT: .cfi_restore_state
+; CHECK-NEXT: beq t1, t2, .LBB4_4
+; CHECK-NEXT: # %bb.6: # %space
+; CHECK-NEXT: #APP
+; CHECK-NEXT: .zero 1048576
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: # %bb.7: # %space
+; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: jump .LBB4_8, s11
+entry:
+ %ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
+ %t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
+ %t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
+ %t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
+ %s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
+ %s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
+ %a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
+ %a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
+ %a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
+ %a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
+ %a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
+ %a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
+ %a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
+ %a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
+ %s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
+ %s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
+ %s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
+ %s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
+ %s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
+ %s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
+ %s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
+ %s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
+ %s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
+ %s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
+ %t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
+ %t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
+ %t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
+ %t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
+
+ br label %cond_1
+
+cond_1:
+ %cmp1 = icmp eq i64 %t5, %t6
+ br i1 %cmp1, label %dest_1, label %cond_2
+
+cond_2:
+ %cmp2 = icmp eq i64 %t3, %t4
+ br i1 %cmp2, label %dest_2, label %cond_3
+
+cond_3:
+ %cmp3 = icmp eq i64 %t1, %t2
+ br i1 %cmp3, label %dest_3, label %space
+
+space:
+ call void asm sideeffect ".space 1048576", ""()
+ br label %dest_1
+
+dest_1:
+ call void asm sideeffect "# dest 1", ""()
+ br label %dest_2
+
+dest_2:
+ call void asm sideeffect "# dest 2", ""()
+ br label %dest_3
+
+dest_3:
+ call void asm sideeffect "# dest 3", ""()
+ br label %tail
+
+tail:
+ call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
+ call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
+ call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
+ call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
+ call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
+ call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
+ call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
+ call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
+ call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
+ call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
+ call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
+ call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
+ call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
+ call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
+ call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
+ call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
+ call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
+ call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
+ call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
+ call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
+ call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
+ call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
+ call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
+ call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
+ call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
+ call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
+ call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
+ call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
+
+ ret void
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-calls.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-calls.ll
new file mode 100644
index 0000000000000..9e34f28792c0f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-calls.ll
@@ -0,0 +1,253 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -code-model=large -mtriple=riscv64 -mattr=+zicfilp-unlabeled -verify-machineinstrs < %s \
+; RUN: | FileCheck %s
+
+declare i32 @external_function(i32)
+
+define i32 @test_call_external(i32 %a) nounwind {
+; CHECK-LABEL: test_call_external:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi0:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi0)(a1)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %1 = call i32 @external_function(i32 %a)
+ ret i32 %1
+}
+
+declare dso_local i32 @dso_local_function(i32)
+
+define i32 @test_call_dso_local(i32 %a) nounwind {
+; CHECK-LABEL: test_call_dso_local:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi1:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi1)(a1)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %1 = call i32 @dso_local_function(i32 %a)
+ ret i32 %1
+}
+
+define i32 @defined_function(i32 %a) nounwind {
+; CHECK-LABEL: defined_function:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addiw a0, a0, 1
+; CHECK-NEXT: ret
+ %1 = add i32 %a, 1
+ ret i32 %1
+}
+
+define i32 @test_call_defined(i32 %a) nounwind {
+; CHECK-LABEL: test_call_defined:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi2:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI3_0)
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi2)(a1)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %1 = call i32 @defined_function(i32 %a)
+ ret i32 %1
+}
+
+define i32 @test_call_indirect(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: test_call_indirect:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: jalr a2
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %1 = call i32 %a(i32 %b)
+ ret i32 %1
+}
+
+; Make sure we don't use t0 as the source for jalr as that is a hint to pop the
+; return address stack on some microarchitectures.
+define i32 @test_call_indirect_no_t0(ptr %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) nounwind {
+; CHECK-LABEL: test_call_indirect_no_t0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: mv t1, a0
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: mv a3, a4
+; CHECK-NEXT: mv a4, a5
+; CHECK-NEXT: mv a5, a6
+; CHECK-NEXT: mv a6, a7
+; CHECK-NEXT: jalr t1
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %1 = call i32 %a(i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
+ ret i32 %1
+}
+
+; Ensure that calls to fastcc functions aren't rejected. Such calls may be
+; introduced when compiling with optimisation.
+
+define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: fastcc_function:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addw a0, a0, a1
+; CHECK-NEXT: ret
+ %1 = add i32 %a, %b
+ ret i32 %1
+}
+
+define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: test_call_fastcc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: mv s0, a0
+; CHECK-NEXT: .Lpcrel_hi3:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI7_0)
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi3)(a0)
+; CHECK-NEXT: mv a0, s0
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: mv a0, s0
+; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %1 = call fastcc i32 @fastcc_function(i32 %a, i32 %b)
+ ret i32 %a
+}
+
+declare i32 @external_many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind
+
+define i32 @test_call_external_many_args(i32 %a) nounwind {
+; CHECK-LABEL: test_call_external_many_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT: mv s0, a0
+; CHECK-NEXT: .Lpcrel_hi4:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI8_0)
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi4)(a0)
+; CHECK-NEXT: sd s0, 0(sp)
+; CHECK-NEXT: sd s0, 8(sp)
+; CHECK-NEXT: mv a0, s0
+; CHECK-NEXT: mv a1, s0
+; CHECK-NEXT: mv a2, s0
+; CHECK-NEXT: mv a3, s0
+; CHECK-NEXT: mv a4, s0
+; CHECK-NEXT: mv a5, s0
+; CHECK-NEXT: mv a6, s0
+; CHECK-NEXT: mv a7, s0
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: mv a0, s0
+; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 32
+; CHECK-NEXT: ret
+ %1 = call i32 @external_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a,
+ i32 %a, i32 %a, i32 %a, i32 %a, i32 %a)
+ ret i32 %a
+}
+
+define i32 @defined_many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 %j) nounwind {
+; CHECK-LABEL: defined_many_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: lw a0, 8(sp)
+; CHECK-NEXT: addiw a0, a0, 1
+; CHECK-NEXT: ret
+ %added = add i32 %j, 1
+ ret i32 %added
+}
+
+define i32 @test_call_defined_many_args(i32 %a) nounwind {
+; CHECK-LABEL: test_call_defined_many_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi5:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI10_0)
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi5)(a1)
+; CHECK-NEXT: sd a0, 0(sp)
+; CHECK-NEXT: sd a0, 8(sp)
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: mv a4, a0
+; CHECK-NEXT: mv a5, a0
+; CHECK-NEXT: mv a6, a0
+; CHECK-NEXT: mv a7, a0
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 32
+; CHECK-NEXT: ret
+ %1 = call i32 @defined_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a,
+ i32 %a, i32 %a, i32 %a, i32 %a, i32 %a)
+ ret i32 %1
+}
+
+define fastcc void @fastcc_call_nonfastcc(){
+; CHECK-LABEL: fastcc_call_nonfastcc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: li t0, 10
+; CHECK-NEXT: li t1, 9
+; CHECK-NEXT: .Lpcrel_hi6:
+; CHECK-NEXT: auipc a5, %pcrel_hi(.LCPI11_0)
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: li a1, 2
+; CHECK-NEXT: li a2, 3
+; CHECK-NEXT: li a3, 4
+; CHECK-NEXT: li a4, 5
+; CHECK-NEXT: ld t2, %pcrel_lo(.Lpcrel_hi6)(a5)
+; CHECK-NEXT: li a5, 6
+; CHECK-NEXT: li a6, 7
+; CHECK-NEXT: li a7, 8
+; CHECK-NEXT: sd t1, 0(sp)
+; CHECK-NEXT: sd t0, 8(sp)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: addi sp, sp, 32
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ call void @external_many_args(i32 1, i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10)
+ ret void
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-nest-register.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-nest-register.ll
new file mode 100644
index 0000000000000..a90c470ff23a4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-nest-register.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+zicfilp-unlabeled -verify-machineinstrs < %s | \
+; RUN: FileCheck -check-prefix=RV64I %s
+; RUN: not llc -mtriple=riscv64 -mattr=+zicfilp-unlabeled -target-abi=lp64e -verify-machineinstrs < %s \
+; RUN: 2>&1 | FileCheck -check-prefix=LP64E %s
+
+; Tests that the 'nest' parameter attribute causes the relevant parameter to be
+; passed in the right register.
+
+define ptr @nest_receiver(ptr nest %arg) nounwind {
+; RV64I-LABEL: nest_receiver:
+; RV64I: # %bb.0:
+; RV64I-NEXT: auipc zero, 0
+; RV64I-NEXT: mv a0, t3
+; RV64I-NEXT: ret
+ ret ptr %arg
+}
+
+define ptr @nest_caller(ptr %arg) nounwind {
+; RV64I-LABEL: nest_caller:
+; RV64I: # %bb.0:
+; RV64I-NEXT: auipc zero, 0
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: mv t3, a0
+; RV64I-NEXT: call nest_receiver
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+ %result = call ptr @nest_receiver(ptr nest %arg)
+ ret ptr %result
+}
+
+; LP64E: LLVM ERROR: Nested functions with control flow protection are not usable with ILP32E or LP64E ABI.
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"cf-protection-branch", i32 1}
+!1 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-tail-calls.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-tail-calls.ll
new file mode 100644
index 0000000000000..d444ef7951d2b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-tail-calls.ll
@@ -0,0 +1,434 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv32-unknown-linux-gnu -mattr=+zicfilp-unlabeled -code-model=large -o - %s | FileCheck %s
+
+; Perform tail call optimization for global address.
+declare i32 @callee_tail(i32 %i)
+define i32 @caller_tail(i32 %i) nounwind {
+; CHECK-LABEL: caller_tail:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: .Lpcrel_hi0:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi0)(a1)
+; CHECK-NEXT: jr t2
+entry:
+ %r = tail call i32 @callee_tail(i32 %i)
+ ret i32 %r
+}
+
+; Perform tail call optimization for external symbol.
+ at dest = global [2 x i8] zeroinitializer
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
+define void @caller_extern(ptr %src) optsize {
+; CHECK-LABEL: caller_extern:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: .Lpcrel_hi1:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
+; CHECK-NEXT: .Lpcrel_hi2:
+; CHECK-NEXT: auipc a2, %pcrel_hi(.LCPI1_1)
+; CHECK-NEXT: lw a1, %pcrel_lo(.Lpcrel_hi1)(a1)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi2)(a2)
+; CHECK-NEXT: li a2, 7
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: jr t2
+entry:
+ tail call void @llvm.memcpy.p0.p0.i32(ptr @dest, ptr %src, i32 7, i1 false)
+ ret void
+}
+
+; Perform tail call optimization for external symbol.
+ at dest_pgso = global [2 x i8] zeroinitializer
+define void @caller_extern_pgso(ptr %src) !prof !14 {
+; CHECK-LABEL: caller_extern_pgso:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: .Lpcrel_hi3:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI2_0)
+; CHECK-NEXT: .Lpcrel_hi4:
+; CHECK-NEXT: auipc a2, %pcrel_hi(.LCPI2_1)
+; CHECK-NEXT: lw a1, %pcrel_lo(.Lpcrel_hi3)(a1)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi4)(a2)
+; CHECK-NEXT: li a2, 7
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: jr t2
+entry:
+ tail call void @llvm.memcpy.p0.p0.i32(ptr @dest_pgso, ptr %src, i32 7, i1 false)
+ ret void
+}
+
+; Perform indirect tail call optimization (for function pointer call).
+declare void @callee_indirect1()
+declare void @callee_indirect2()
+define void @caller_indirect_tail(i32 %a) nounwind {
+; CHECK-LABEL: caller_indirect_tail:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: beqz a0, .LBB3_2
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: .Lpcrel_hi6:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
+; CHECK-NEXT: lw t1, %pcrel_lo(.Lpcrel_hi6)(a0)
+; CHECK-NEXT: jr t1
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: .Lpcrel_hi5:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI3_1)
+; CHECK-NEXT: lw t1, %pcrel_lo(.Lpcrel_hi5)(a0)
+; CHECK-NEXT: jr t1
+entry:
+ %tobool = icmp eq i32 %a, 0
+ %callee = select i1 %tobool, ptr @callee_indirect1, ptr @callee_indirect2
+ tail call void %callee()
+ ret void
+}
+
+; Make sure we don't use t0 as the source for jr as that is a hint to pop the
+; return address stack on some microarchitectures.
+define i32 @caller_indirect_no_t0(ptr %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) {
+; CHECK-LABEL: caller_indirect_no_t0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: mv t1, a0
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: mv a3, a4
+; CHECK-NEXT: mv a4, a5
+; CHECK-NEXT: mv a5, a6
+; CHECK-NEXT: mv a6, a7
+; CHECK-NEXT: jr t1
+ %9 = tail call i32 %0(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7)
+ ret i32 %9
+}
+
+; Do not tail call optimize functions with varargs passed by stack.
+declare i32 @callee_varargs(i32, ...)
+define void @caller_varargs(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: caller_varargs:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi7:
+; CHECK-NEXT: auipc a2, %pcrel_hi(.LCPI5_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi7)(a2)
+; CHECK-NEXT: sw a0, 0(sp)
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: mv a4, a0
+; CHECK-NEXT: mv a5, a1
+; CHECK-NEXT: mv a6, a1
+; CHECK-NEXT: mv a7, a0
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+entry:
+ %call = tail call i32 (i32, ...) @callee_varargs(i32 %a, i32 %b, i32 %b, i32 %a, i32 %a, i32 %b, i32 %b, i32 %a, i32 %a)
+ ret void
+}
+
+; Do not tail call optimize if stack is used to pass parameters.
+declare i32 @callee_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n)
+define i32 @caller_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n) nounwind {
+; CHECK-LABEL: caller_args:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT: lw t0, 32(sp)
+; CHECK-NEXT: lw t1, 36(sp)
+; CHECK-NEXT: lw t3, 40(sp)
+; CHECK-NEXT: lw t4, 44(sp)
+; CHECK-NEXT: lw t2, 48(sp)
+; CHECK-NEXT: lw t5, 52(sp)
+; CHECK-NEXT: sw t2, 16(sp)
+; CHECK-NEXT: sw t5, 20(sp)
+; CHECK-NEXT: .Lpcrel_hi8:
+; CHECK-NEXT: auipc t2, %pcrel_hi(.LCPI6_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi8)(t2)
+; CHECK-NEXT: sw t0, 0(sp)
+; CHECK-NEXT: sw t1, 4(sp)
+; CHECK-NEXT: sw t3, 8(sp)
+; CHECK-NEXT: sw t4, 12(sp)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 32
+; CHECK-NEXT: ret
+entry:
+ %r = tail call i32 @callee_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n)
+ ret i32 %r
+}
+
+; Do not tail call optimize if parameters need to be passed indirectly.
+declare i32 @callee_indirect_args(fp128 %a)
+define void @caller_indirect_args() nounwind {
+; CHECK-LABEL: caller_indirect_args:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT: lui a1, 262128
+; CHECK-NEXT: .Lpcrel_hi9:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI7_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi9)(a0)
+; CHECK-NEXT: mv a0, sp
+; CHECK-NEXT: sw zero, 0(sp)
+; CHECK-NEXT: sw zero, 4(sp)
+; CHECK-NEXT: sw zero, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 32
+; CHECK-NEXT: ret
+entry:
+ %call = tail call i32 @callee_indirect_args(fp128 0xL00000000000000003FFF000000000000)
+ ret void
+}
+
+; Perform tail call optimization for external weak symbol.
+declare extern_weak void @callee_weak()
+define void @caller_weak() nounwind {
+; CHECK-LABEL: caller_weak:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: .Lpcrel_hi10:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI8_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi10)(a0)
+; CHECK-NEXT: jr t2
+entry:
+ tail call void @callee_weak()
+ ret void
+}
+
+; Exception-handling functions need a special set of instructions to indicate a
+; return to the hardware. Tail-calling another function would probably break
+; this.
+declare void @callee_irq()
+define void @caller_irq() nounwind "interrupt"="machine" {
+; CHECK-LABEL: caller_irq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -64
+; CHECK-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi11:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI9_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi11)(a0)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 64
+; CHECK-NEXT: mret
+entry:
+ tail call void @callee_irq()
+ ret void
+}
+
+; Byval parameters hand the function a pointer directly into the stack area
+; we want to reuse during a tail call. Do not tail call optimize functions with
+; byval parameters.
+declare i32 @callee_byval(ptr byval(ptr) %a)
+define i32 @caller_byval() nounwind {
+; CHECK-LABEL: caller_byval:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: lw a0, 8(sp)
+; CHECK-NEXT: sw a0, 4(sp)
+; CHECK-NEXT: .Lpcrel_hi12:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI10_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi12)(a0)
+; CHECK-NEXT: addi a0, sp, 4
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+entry:
+ %a = alloca ptr
+ %r = tail call i32 @callee_byval(ptr byval(ptr) %a)
+ ret i32 %r
+}
+
+; Do not tail call optimize if callee uses structret semantics.
+%struct.A = type { i32 }
+ at a = global %struct.A zeroinitializer
+
+declare void @callee_struct(ptr sret(%struct.A) %a)
+define void @caller_nostruct() nounwind {
+; CHECK-LABEL: caller_nostruct:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi13:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI11_0)
+; CHECK-NEXT: .Lpcrel_hi14:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI11_1)
+; CHECK-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi13)(a0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi14)(a1)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+entry:
+ tail call void @callee_struct(ptr sret(%struct.A) @a)
+ ret void
+}
+
+; Do not tail call optimize if caller uses structret semantics.
+declare void @callee_nostruct()
+define void @caller_struct(ptr sret(%struct.A) %a) nounwind {
+; CHECK-LABEL: caller_struct:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi15:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI12_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi15)(a0)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+entry:
+ tail call void @callee_nostruct()
+ ret void
+}
+
+; Do not tail call optimize if disabled.
+define i32 @disable_tail_calls(i32 %i) nounwind "disable-tail-calls"="true" {
+; CHECK-LABEL: disable_tail_calls:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .Lpcrel_hi16:
+; CHECK-NEXT: auipc a1, %pcrel_hi(.LCPI13_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi16)(a1)
+; CHECK-NEXT: jalr t2
+; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+entry:
+ %rv = tail call i32 @callee_tail(i32 %i)
+ ret i32 %rv
+}
+
+; Duplicate returns to enable tail call optimizations.
+declare i32 @test()
+declare i32 @test1()
+declare i32 @test2()
+declare i32 @test3()
+define i32 @duplicate_returns(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: duplicate_returns:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: auipc zero, 0
+; CHECK-NEXT: beqz a0, .LBB14_4
+; CHECK-NEXT: # %bb.1: # %if.else
+; CHECK-NEXT: beqz a1, .LBB14_5
+; CHECK-NEXT: # %bb.2: # %if.else4
+; CHECK-NEXT: bge a1, a0, .LBB14_6
+; CHECK-NEXT: # %bb.3: # %if.then6
+; CHECK-NEXT: .Lpcrel_hi19:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI14_1)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi19)(a0)
+; CHECK-NEXT: jr t2
+; CHECK-NEXT: .LBB14_4: # %if.then
+; CHECK-NEXT: .Lpcrel_hi17:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI14_3)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi17)(a0)
+; CHECK-NEXT: jr t2
+; CHECK-NEXT: .LBB14_5: # %if.then2
+; CHECK-NEXT: .Lpcrel_hi18:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI14_2)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi18)(a0)
+; CHECK-NEXT: jr t2
+; CHECK-NEXT: .LBB14_6: # %if.else8
+; CHECK-NEXT: .Lpcrel_hi20:
+; CHECK-NEXT: auipc a0, %pcrel_hi(.LCPI14_0)
+; CHECK-NEXT: lw t2, %pcrel_lo(.Lpcrel_hi20)(a0)
+; CHECK-NEXT: jr t2
+entry:
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %call = tail call i32 @test()
+ br label %return
+
+if.else: ; preds = %entry
+ %cmp1 = icmp eq i32 %b, 0
+ br i1 %cmp1, label %if.then2, label %if.else4
+
+if.then2: ; preds = %if.else
+ %call3 = tail call i32 @test1()
+ br label %return
+
+if.else4: ; preds = %if.else
+ %cmp5 = icmp sgt i32 %a, %b
+ br i1 %cmp5, label %if.then6, label %if.else8
+
+if.then6: ; preds = %if.else4
+ %call7 = tail call i32 @test2()
+ br label %return
+
+if.else8: ; preds = %if.else4
+ %call9 = tail call i32 @test3()
+ br label %return
+
+return: ; preds = %if.else8, %if.then6, %if.then2, %if.then
+ %retval = phi i32 [ %call, %if.then ], [ %call3, %if.then2 ], [ %call7, %if.then6 ], [ %call9, %if.else8 ]
+ ret i32 %retval
+}
+
+!llvm.module.flags = !{!0, !15, !16}
+!0 = !{i32 1, !"ProfileSummary", !1}
+!1 = !{!2, !3, !4, !5, !6, !7, !8, !9}
+!2 = !{!"ProfileFormat", !"InstrProf"}
+!3 = !{!"TotalCount", i64 10000}
+!4 = !{!"MaxCount", i64 10}
+!5 = !{!"MaxInternalCount", i64 1}
+!6 = !{!"MaxFunctionCount", i64 1000}
+!7 = !{!"NumCounts", i64 3}
+!8 = !{!"NumFunctions", i64 3}
+!9 = !{!"DetailedSummary", !10}
+!10 = !{!11, !12, !13}
+!11 = !{i32 10000, i64 100, i32 1}
+!12 = !{i32 999000, i64 100, i32 1}
+!13 = !{i32 999999, i64 1, i32 2}
+!14 = !{!"function_entry_count", i64 0}
+!15 = !{i32 8, !"cf-protection-branch", i32 1}
+!16 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-disabled.ll b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-disabled.ll
new file mode 100644
index 0000000000000..45090922eafd0
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-disabled.ll
@@ -0,0 +1,18 @@
+target datalayout = "e-m:e-p:32:32-i64:64-n32-S128"
+target triple = "riscv32-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv32" "target-features"="+32bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"ilp32"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv32i2p1"}
+!4 = !{i32 8, !"SmallDataLimit", i32 0}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unknown-scheme.ll b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unknown-scheme.ll
new file mode 100644
index 0000000000000..c390edddb0c0a
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unknown-scheme.ll
@@ -0,0 +1,20 @@
+target datalayout = "e-m:e-p:32:32-i64:64-n32-S128"
+target triple = "riscv32-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv32" "target-features"="+32bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"ilp32"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv32i2p1"}
+!4 = !{i32 8, !"cf-protection-branch", i32 1}
+!5 = !{i32 1, !"cf-branch-label-scheme", !"unknown-scheme"}
+!6 = !{i32 8, !"SmallDataLimit", i32 0}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unlabeled.ll b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unlabeled.ll
new file mode 100644
index 0000000000000..7c6f56422683e
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv32-foo-unlabeled.ll
@@ -0,0 +1,20 @@
+target datalayout = "e-m:e-p:32:32-i64:64-n32-S128"
+target triple = "riscv32-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv32" "target-features"="+32bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"ilp32"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv32i2p1"}
+!4 = !{i32 8, !"cf-protection-branch", i32 1}
+!5 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
+!6 = !{i32 8, !"SmallDataLimit", i32 0}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-disabled.ll b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-disabled.ll
new file mode 100644
index 0000000000000..e59f8ec41c1a6
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-disabled.ll
@@ -0,0 +1,18 @@
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local signext i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"lp64"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv64i2p1"}
+!4 = !{i32 8, !"SmallDataLimit", i32 0}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unknown-scheme.ll b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unknown-scheme.ll
new file mode 100644
index 0000000000000..f308f10dd718d
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unknown-scheme.ll
@@ -0,0 +1,20 @@
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local signext i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"lp64"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv64i2p1"}
+!4 = !{i32 8, !"cf-protection-branch", i32 1}
+!5 = !{i32 1, !"cf-branch-label-scheme", !"unknown-scheme"}
+!6 = !{i32 8, !"SmallDataLimit", i32 0}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unlabeled.ll b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unlabeled.ll
new file mode 100644
index 0000000000000..0aa7f112ba942
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/Inputs/rv64-foo-unlabeled.ll
@@ -0,0 +1,20 @@
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local signext i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"lp64"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv64i2p1"}
+!4 = !{i32 8, !"cf-protection-branch", i32 1}
+!5 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
+!6 = !{i32 8, !"SmallDataLimit", i32 0}
diff --git a/llvm/test/LTO/RISCV/branch-cfi/rv32-unlabeled.ll b/llvm/test/LTO/RISCV/branch-cfi/rv32-unlabeled.ll
new file mode 100644
index 0000000000000..cb29af67b777e
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/rv32-unlabeled.ll
@@ -0,0 +1,43 @@
+; RUN: llvm-as %s -o %t.main.bc
+; RUN: llvm-as %p/Inputs/rv32-foo-unlabeled.ll -o %t.foo.unlabeled.bc
+; RUN: llvm-link %t.main.bc %t.foo.unlabeled.bc -S | FileCheck --check-prefix=UNLABELED %s
+
+; RUN: llvm-as %p/Inputs/rv32-foo-disabled.ll -o %t.foo.disabled.bc
+; RUN: llvm-link %t.main.bc %t.foo.disabled.bc -S | FileCheck --check-prefix=DISABLED %s
+
+; RUN: llvm-as %p/Inputs/rv32-foo-unknown-scheme.ll -o %t.foo.unknown.scheme.bc
+; RUN: not llvm-link %t.main.bc %t.foo.unknown.scheme.bc 2>&1 | FileCheck --check-prefix=SCHEME-CONFLICT %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32-S128"
+target triple = "riscv32-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, ptr %retval, align 4
+ %call = call i32 @foo()
+ ret i32 %call
+}
+
+declare dso_local i32 @foo() #1
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv32" "target-features"="+32bit" }
+attributes #1 = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv32" "target-features"="+32bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"ilp32"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv32i2p1"}
+; UNLABELED-DAG: [[P_FLAG:![0-9]+]] = !{i32 8, !"cf-protection-branch", i32 1}
+; DISABLED-DAG: [[P_FLAG:![0-9]+]] = !{i32 8, !"cf-protection-branch", i32 0}
+!4 = !{i32 8, !"cf-protection-branch", i32 1}
+; UNLABELED-DAG: [[S_FLAG:![0-9]+]] = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
+!5 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
+!6 = !{i32 8, !"SmallDataLimit", i32 0}
+; UNLABELED-DAG: !llvm.module.flags = !{{[{].*}}[[P_FLAG]]{{, .*}}[[S_FLAG]]{{[,}]}}
+; DISABLED-DAG: !llvm.module.flags = !{{[{].*}}[[P_FLAG]]{{[,}]}}
+
+; SCHEME-CONFLICT: error: linking module flags 'cf-branch-label-scheme': IDs have conflicting values: '!"unknown-scheme"' from {{.*}}, and '!"unlabeled"' from llvm-link
diff --git a/llvm/test/LTO/RISCV/branch-cfi/rv64-unlabeled.ll b/llvm/test/LTO/RISCV/branch-cfi/rv64-unlabeled.ll
new file mode 100644
index 0000000000000..b0897a8462319
--- /dev/null
+++ b/llvm/test/LTO/RISCV/branch-cfi/rv64-unlabeled.ll
@@ -0,0 +1,43 @@
+; RUN: llvm-as %s -o %t.main.bc
+; RUN: llvm-as %p/Inputs/rv64-foo-unlabeled.ll -o %t.foo.unlabeled.bc
+; RUN: llvm-link %t.main.bc %t.foo.unlabeled.bc -S | FileCheck --check-prefix=UNLABELED %s
+
+; RUN: llvm-as %p/Inputs/rv64-foo-disabled.ll -o %t.foo.disabled.bc
+; RUN: llvm-link %t.main.bc %t.foo.disabled.bc -S | FileCheck --check-prefix=DISABLED %s
+
+; RUN: llvm-as %p/Inputs/rv64-foo-unknown-scheme.ll -o %t.foo.unknown.scheme.bc
+; RUN: not llvm-link %t.main.bc %t.foo.unknown.scheme.bc 2>&1 | FileCheck --check-prefix=SCHEME-CONFLICT %s
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone
+define dso_local signext i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, ptr %retval, align 4
+ %call = call signext i32 @foo()
+ ret i32 %call
+}
+
+declare dso_local signext i32 @foo() #1
+
+attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit" }
+attributes #1 = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit" }
+
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"target-abi", !"lp64"}
+!2 = !{i32 6, !"riscv-isa", !3}
+!3 = !{!"rv64i2p1"}
+; UNLABELED-DAG: [[P_FLAG:![0-9]+]] = !{i32 8, !"cf-protection-branch", i32 1}
+; DISABLED-DAG: [[P_FLAG:![0-9]+]] = !{i32 8, !"cf-protection-branch", i32 0}
+!4 = !{i32 8, !"cf-protection-branch", i32 1}
+; UNLABELED-DAG: [[S_FLAG:![0-9]+]] = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
+!5 = !{i32 1, !"cf-branch-label-scheme", !"unlabeled"}
+!6 = !{i32 8, !"SmallDataLimit", i32 0}
+; UNLABELED-DAG: !llvm.module.flags = !{{[{].*}}[[P_FLAG]]{{, .*}}[[S_FLAG]]{{[,}]}}
+; DISABLED-DAG: !llvm.module.flags = !{{[{].*}}[[P_FLAG]]{{[,}]}}
+
+; SCHEME-CONFLICT: error: linking module flags 'cf-branch-label-scheme': IDs have conflicting values: '!"unknown-scheme"' from {{.*}}, and '!"unlabeled"' from llvm-link
>From 163bc5fd6b383aa4e81472614511fea9e33076a3 Mon Sep 17 00:00:00 2001
From: Ming-Yi Lai <ming-yi.lai at mediatek.com>
Date: Mon, 8 Dec 2025 11:11:43 +0800
Subject: [PATCH 2/2] fixup: Fix indirect branch insersion and tests
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 4 ++--
...icfilp-unlabeled-branch-relaxation-rv32.ll | 20 +++++++++----------
...icfilp-unlabeled-branch-relaxation-rv64.ll | 20 +++++++++----------
3 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 9fb7ac0573824..c5fec06ea46a4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1373,7 +1373,7 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
RS->enterBasicBlockEnd(MBB);
const TargetRegisterClass *RC = &RISCV::GPRRegClass;
- if (STI.hasStdExtZicfilp())
+ if (STI.hasZicfilpCFI())
RC = &RISCV::GPRX7RegClass;
Register TmpGPR =
RS->scavengeRegisterBackwards(*RC, MI.getIterator(),
@@ -1387,7 +1387,7 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
// Pick s11(or s1 for rve) because it doesn't make a difference.
TmpGPR = STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
// Force t2 if Zicfilp is on
- if (STI.hasStdExtZicfilp())
+ if (STI.hasZicfilpCFI())
TmpGPR = RISCV::X7;
int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll
index 1f5d1c8a8d5e1..680ec395d61dc 100644
--- a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv32.ll
@@ -33,7 +33,7 @@ define i32 @relax_jal(i1 %a) nounwind {
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: bnez a0, .LBB1_1
; CHECK-NEXT: # %bb.4:
-; CHECK-NEXT: jump .LBB1_2, a0
+; CHECK-NEXT: jump .LBB1_2, t2
; CHECK-NEXT: .LBB1_1: # %iftrue
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
@@ -184,15 +184,15 @@ define void @relax_jal_spill_32() {
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: beq t5, t6, .LBB2_1
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
-; CHECK-NEXT: jump .LBB2_4, s11
+; CHECK-NEXT: sw t2, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: jump .LBB2_4, t2
; CHECK-NEXT: .LBB2_1: # %branch_1
; CHECK-NEXT: #APP
; CHECK-NEXT: .zero 1048576
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: j .LBB2_2
; CHECK-NEXT: .LBB2_4: # %branch_2
-; CHECK-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t2, 0(sp) # 4-byte Folded Reload
; CHECK-NEXT: .LBB2_2: # %branch_2
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use ra
@@ -504,15 +504,15 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: beq t5, t6, .LBB3_1
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
-; CHECK-NEXT: jump .LBB3_4, s11
+; CHECK-NEXT: sw t2, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: jump .LBB3_4, t2
; CHECK-NEXT: .LBB3_1: # %branch_1
; CHECK-NEXT: #APP
; CHECK-NEXT: .zero 1048576
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: j .LBB3_2
; CHECK-NEXT: .LBB3_4: # %branch_2
-; CHECK-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t2, 0(sp) # 4-byte Folded Reload
; CHECK-NEXT: .LBB3_2: # %branch_2
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use ra
@@ -820,7 +820,7 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
; CHECK-NEXT: bne t5, t6, .LBB4_2
; CHECK-NEXT: j .LBB4_1
; CHECK-NEXT: .LBB4_8: # %dest_1
-; CHECK-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw t2, 0(sp) # 4-byte Folded Reload
; CHECK-NEXT: .LBB4_1: # %dest_1
; CHECK-NEXT: #APP
; CHECK-NEXT: # dest 1
@@ -957,8 +957,8 @@ define void @relax_jal_spill_32_restore_block_correspondence() {
; CHECK-NEXT: .zero 1048576
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: # %bb.7: # %space
-; CHECK-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
-; CHECK-NEXT: jump .LBB4_8, s11
+; CHECK-NEXT: sw t2, 0(sp) # 4-byte Folded Spill
+; CHECK-NEXT: jump .LBB4_8, t2
entry:
%ra = call i32 asm sideeffect "addi ra, x0, 1", "={ra}"()
%t0 = call i32 asm sideeffect "addi t0, x0, 5", "={t0}"()
diff --git a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll
index 3fb7102477ed5..fd9dd3977de16 100644
--- a/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/zicfilp-unlabeled-branch-relaxation-rv64.ll
@@ -33,7 +33,7 @@ define i32 @relax_jal(i1 %a) nounwind {
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: bnez a0, .LBB1_1
; CHECK-NEXT: # %bb.4:
-; CHECK-NEXT: jump .LBB1_2, a0
+; CHECK-NEXT: jump .LBB1_2, t2
; CHECK-NEXT: .LBB1_1: # %iftrue
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
@@ -185,15 +185,15 @@ define void @relax_jal_spill_64() {
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: beq t5, t6, .LBB2_1
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
-; CHECK-NEXT: jump .LBB2_4, s11
+; CHECK-NEXT: sd t2, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: jump .LBB2_4, t2
; CHECK-NEXT: .LBB2_1: # %branch_1
; CHECK-NEXT: #APP
; CHECK-NEXT: .zero 1048576
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: j .LBB2_2
; CHECK-NEXT: .LBB2_4: # %branch_2
-; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld t2, 0(sp) # 8-byte Folded Reload
; CHECK-NEXT: .LBB2_2: # %branch_2
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use ra
@@ -505,15 +505,15 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: beq t5, t6, .LBB3_1
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
-; CHECK-NEXT: jump .LBB3_4, s11
+; CHECK-NEXT: sd t2, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: jump .LBB3_4, t2
; CHECK-NEXT: .LBB3_1: # %branch_1
; CHECK-NEXT: #APP
; CHECK-NEXT: .zero 1048576
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: j .LBB3_2
; CHECK-NEXT: .LBB3_4: # %branch_2
-; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld t2, 0(sp) # 8-byte Folded Reload
; CHECK-NEXT: .LBB3_2: # %branch_2
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use ra
@@ -821,7 +821,7 @@ define void @relax_jal_spill_64_restore_block_correspondence() {
; CHECK-NEXT: bne t5, t6, .LBB4_2
; CHECK-NEXT: j .LBB4_1
; CHECK-NEXT: .LBB4_8: # %dest_1
-; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld t2, 0(sp) # 8-byte Folded Reload
; CHECK-NEXT: .LBB4_1: # %dest_1
; CHECK-NEXT: #APP
; CHECK-NEXT: # dest 1
@@ -958,8 +958,8 @@ define void @relax_jal_spill_64_restore_block_correspondence() {
; CHECK-NEXT: .zero 1048576
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: # %bb.7: # %space
-; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
-; CHECK-NEXT: jump .LBB4_8, s11
+; CHECK-NEXT: sd t2, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: jump .LBB4_8, t2
entry:
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
%t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
More information about the llvm-commits
mailing list