[clang] [llvm] [RISCV] RISCV vector calling convention (1/2) (PR #77560)
Brandon Wu via cfe-commits
cfe-commits at lists.llvm.org
Sat Mar 2 07:49:16 PST 2024
https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/77560
>From 21b0d4258d0ac9d1ef43863dd0fbf46ba8591795 Mon Sep 17 00:00:00 2001
From: 4vtomat <brandon.wu at sifive.com>
Date: Wed, 22 Mar 2023 22:58:35 -0700
Subject: [PATCH 01/10] [RISCV] RISCV vector calling convention (1/2)
This is the vector calling convention based on
https://github.com/riscv-non-isa/riscv-elf-psabi-doc,
the idea is to split between "scalar" callee-saved registers
and "vector" callee-saved registers. "scalar" ones remain the
original strategy, however, "vector" ones are handled together
with RVV objects.
The stack layout would be:
|--------------------------| <-- FP
| callee-allocated save |
| area for register varargs|
|--------------------------|
| callee-saved registers | <-- scalar callee-saved
| (scalar) |
|--------------------------|
| RVV alignment padding |
|--------------------------|
| callee-saved registers | <-- vector callee-saved
| (vector) |
|--------------------------|
| RVV objects |
|--------------------------|
| padding before RVV |
|--------------------------|
| scalar local variables |
|--------------------------| <-- BP
| variable size objects |
|--------------------------| <-- SP
Note: This patch doesn't contain "tuple" type, e.g. vint32m1x2.
It will be handled in https://github.com/riscv-non-isa/riscv-elf-psabi-doc (2/2).
Differential Revision: https://reviews.llvm.org/D154576
---
clang/include/clang/Basic/Attr.td | 5 +
clang/include/clang/Basic/AttrDocs.td | 10 ++
clang/include/clang/Basic/Specifiers.h | 1 +
clang/lib/AST/ItaniumMangle.cpp | 1 +
clang/lib/AST/Type.cpp | 2 +
clang/lib/AST/TypePrinter.cpp | 6 +
clang/lib/Basic/Targets/RISCV.cpp | 11 ++
clang/lib/Basic/Targets/RISCV.h | 2 +
clang/lib/CodeGen/CGCall.cpp | 4 +
clang/lib/Sema/SemaDeclAttr.cpp | 7 ++
clang/lib/Sema/SemaType.cpp | 5 +-
.../RISCV/riscv-vector-callingconv-llvm-ir.c | 27 +++++
.../RISCV/riscv-vector-callingconv.cpp | 19 +++
llvm/include/llvm/AsmParser/LLToken.h | 1 +
llvm/include/llvm/IR/CallingConv.h | 3 +
llvm/lib/AsmParser/LLLexer.cpp | 1 +
llvm/lib/AsmParser/LLParser.cpp | 2 +
llvm/lib/IR/AsmWriter.cpp | 1 +
llvm/lib/Target/RISCV/RISCVCallingConv.td | 13 ++
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 112 ++++++++++++++----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 1 +
llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 15 +++
.../CodeGen/RISCV/rvv/callee-saved-regs.ll | 95 +++++++++++++++
23 files changed, 320 insertions(+), 24 deletions(-)
create mode 100644 clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
create mode 100644 clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
create mode 100644 llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index fa191c7378dba4..eb9e7cd3075d04 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -2980,6 +2980,11 @@ def PreserveNone : DeclOrTypeAttr, TargetSpecificAttr<TargetAnyX86> {
let Documentation = [PreserveNoneDocs];
}
+def RISCVVectorCC: DeclOrTypeAttr {
+ let Spellings = [Clang<"riscv_vector_cc">];
+ let Documentation = [RISCVVectorCCDocs];
+}
+
def Target : InheritableAttr {
let Spellings = [GCC<"target">];
let Args = [StringArgument<"featuresStr">];
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index b96fbddd51154c..8f3fd8927a10a9 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -5434,6 +5434,16 @@ for clang builtin functions.
}];
}
+def RISCVVectorCCDocs : Documentation {
+ let Category = DocCatCallingConvs;
+ let Content = [{
+The ``riscv_vector_cc`` attribute can be applied to a function. It preserves 15
+registers namely, v1-v7 and v24-v31 as callee-saved. Callers thus don't need
+to save these registers before function calls, and callees only need to save
+them only if they use them.
+ }];
+}
+
def PreferredNameDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
diff --git a/clang/include/clang/Basic/Specifiers.h b/clang/include/clang/Basic/Specifiers.h
index 8586405825cfe0..6c23fdb7a1dff1 100644
--- a/clang/include/clang/Basic/Specifiers.h
+++ b/clang/include/clang/Basic/Specifiers.h
@@ -296,6 +296,7 @@ namespace clang {
CC_AMDGPUKernelCall, // __attribute__((amdgpu_kernel))
CC_M68kRTD, // __attribute__((m68k_rtd))
CC_PreserveNone, // __attribute__((preserve_none))
+ CC_RISCVVectorCall, // __attribute__((riscv_vector_cc))
};
/// Checks whether the given calling convention supports variadic
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 1b6141580a81d2..eabe31c2a21d08 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -3444,6 +3444,7 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_PreserveAll:
case CC_M68kRTD:
case CC_PreserveNone:
+ case CC_RISCVVectorCall:
// FIXME: we should be mangling all of the above.
return "";
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 78dcd3f4007a5a..0fc18d44ffd802 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -3439,6 +3439,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_PreserveAll: return "preserve_all";
case CC_M68kRTD: return "m68k_rtd";
case CC_PreserveNone: return "preserve_none";
+ case CC_RISCVVectorCall: return "riscv_vector_cc";
}
llvm_unreachable("Invalid calling convention.");
@@ -3992,6 +3993,7 @@ bool AttributedType::isCallingConv() const {
case attr::PreserveAll:
case attr::M68kRTD:
case attr::PreserveNone:
+ case attr::RISCVVectorCC:
return true;
}
llvm_unreachable("invalid attr kind");
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 7dcc4348f8e036..136dc894e850eb 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -1070,6 +1070,9 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_PreserveNone:
OS << " __attribute__((preserve_none))";
break;
+ case CC_RISCVVectorCall:
+ OS << "__attribute__((riscv_vector_cc))";
+ break;
}
}
@@ -1917,6 +1920,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::PreserveNone:
OS << "preserve_none";
break;
+ case attr::RISCVVectorCC:
+ OS << "riscv_vector_cc";
+ break;
case attr::NoDeref:
OS << "noderef";
break;
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index a6d4af2b88111a..91cf6c9b49222b 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -467,3 +467,14 @@ ParsedTargetAttr RISCVTargetInfo::parseTargetAttr(StringRef Features) const {
}
return Ret;
}
+
+TargetInfo::CallingConvCheckResult
+RISCVTargetInfo::checkCallingConvention(CallingConv CC) const {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_RISCVVectorCall:
+ return CCCR_OK;
+ }
+}
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index bfbdafb682c851..78580b5b1c1063 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -110,6 +110,8 @@ class RISCVTargetInfo : public TargetInfo {
bool hasBFloat16Type() const override { return true; }
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
+
bool useFP16ConversionIntrinsics() const override {
return false;
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d05cf1c6e1814e..aafdff3e7e53b0 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -74,6 +74,7 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
+ case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall;
}
}
@@ -260,6 +261,9 @@ static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
if (D->hasAttr<PreserveNoneAttr>())
return CC_PreserveNone;
+ if (D->hasAttr<RISCVVectorCCAttr>())
+ return CC_RISCVVectorCall;
+
return CC_C;
}
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 8a204b1d3b88d9..3d4bfc2e1c3725 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -5250,6 +5250,9 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_PreserveNone:
D->addAttr(::new (S.Context) PreserveNoneAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_RISCVVectorCC:
+ D->addAttr(::new (S.Context) RISCVVectorCCAttr(S.Context, AL));
+ return;
default:
llvm_unreachable("unexpected attribute kind");
}
@@ -5454,6 +5457,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_PreserveNone:
CC = CC_PreserveNone;
break;
+ case ParsedAttr::AT_RISCVVectorCC:
+ CC = CC_RISCVVectorCall;
+ break;
default: llvm_unreachable("unexpected attribute kind");
}
@@ -9573,6 +9579,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_AMDGPUKernelCall:
case ParsedAttr::AT_M68kRTD:
case ParsedAttr::AT_PreserveNone:
+ case ParsedAttr::AT_RISCVVectorCC:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 1e43e36016a66f..ab92be71ea486d 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -138,7 +138,8 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_PreserveMost: \
case ParsedAttr::AT_PreserveAll: \
case ParsedAttr::AT_M68kRTD: \
- case ParsedAttr::AT_PreserveNone
+ case ParsedAttr::AT_PreserveNone: \
+ case ParsedAttr::AT_RISCVVectorCC
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
@@ -7922,6 +7923,8 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<M68kRTDAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveNone:
return createSimpleAttr<PreserveNoneAttr>(Ctx, Attr);
+ case ParsedAttr::AT_RISCVVectorCC:
+ return createSimpleAttr<RISCVVectorCCAttr>(Ctx, Attr);
}
llvm_unreachable("unexpected attribute kind!");
}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
new file mode 100644
index 00000000000000..a5b59306223dfb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
@@ -0,0 +1,27 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
+
+#include <riscv_vector.h>
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @bar
+vint32m1_t __attribute__((riscv_vector_cc)) bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t ret;
+ vint32m1_t val;
+ val = __riscv_vle32_v_i32m1(base, vl);
+ ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call <vscale x 2 x i32> @baz
+vint32m1_t baz(vint32m1_t input);
+vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t ret;
+ vint32m1_t val;
+ val = __riscv_vle32_v_i32m1(base, vl);
+ ret = baz(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
new file mode 100644
index 00000000000000..22761146093fde
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
@@ -0,0 +1,19 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 %s -triple riscv64 -target-feature +v -verify
+
+__attribute__((riscv_vector_cc)) int var; // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'int'}}
+
+__attribute__((riscv_vector_cc)) void func();
+__attribute__((riscv_vector_cc(1))) void func_invalid(); // expected-error {{'riscv_vector_cc' attribute takes no arguments}}
+
+void test_no_attribute(int); // expected-note {{previous declaration is here}}
+void __attribute__((riscv_vector_cc)) test_no_attribute(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
+
+class test_cc {
+ __attribute__((riscv_vector_cc)) void member_func();
+};
+
+void test_lambda() {
+ __attribute__((riscv_vector_cc)) auto lambda = []() { // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'auto'}}
+ };
+}
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index 3c34706ee03e82..37481eb60aa11b 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -180,6 +180,7 @@ enum Kind {
kw_tailcc,
kw_m68k_rtdcc,
kw_graalcc,
+ kw_riscv_vector_cc,
// Attributes:
kw_attributes,
diff --git a/llvm/include/llvm/IR/CallingConv.h b/llvm/include/llvm/IR/CallingConv.h
index ef8aaf52f4e6ac..a05d1a4d587845 100644
--- a/llvm/include/llvm/IR/CallingConv.h
+++ b/llvm/include/llvm/IR/CallingConv.h
@@ -264,6 +264,9 @@ namespace CallingConv {
/// except that the first parameter is mapped to x9.
ARM64EC_Thunk_Native = 109,
+ /// Calling convention used for RISC-V V-extension.
+ RISCV_VectorCall = 110,
+
/// The highest possible ID. Must be some 2^k - 1.
MaxID = 1023
};
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index 5d8a50eee13068..37df0de1fd7f0b 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -637,6 +637,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(tailcc);
KEYWORD(m68k_rtdcc);
KEYWORD(graalcc);
+ KEYWORD(riscv_vector_cc);
KEYWORD(cc);
KEYWORD(c);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index a91e2f690999e0..8fcc5670810ad2 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -2130,6 +2130,7 @@ void LLParser::parseOptionalDLLStorageClass(unsigned &Res) {
/// ::= 'tailcc'
/// ::= 'm68k_rtdcc'
/// ::= 'graalcc'
+/// ::= 'riscv_vector_cc'
/// ::= 'cc' UINT
///
bool LLParser::parseOptionalCallingConv(unsigned &CC) {
@@ -2200,6 +2201,7 @@ bool LLParser::parseOptionalCallingConv(unsigned &CC) {
case lltok::kw_tailcc: CC = CallingConv::Tail; break;
case lltok::kw_m68k_rtdcc: CC = CallingConv::M68k_RTD; break;
case lltok::kw_graalcc: CC = CallingConv::GRAAL; break;
+ case lltok::kw_riscv_vector_cc:CC = CallingConv::RISCV_VectorCall; break;
case lltok::kw_cc: {
Lex.Lex();
return parseUInt32(CC);
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index d3c64a57f7fdfd..2e5332dfa3f71e 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -363,6 +363,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::AMDGPU_KERNEL: Out << "amdgpu_kernel"; break;
case CallingConv::AMDGPU_Gfx: Out << "amdgpu_gfx"; break;
case CallingConv::M68k_RTD: Out << "m68k_rtdcc"; break;
+ case CallingConv::RISCV_VectorCall: Out << "riscv_vector_cc"; break;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index 11b716f20f3716..ad06f477437702 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -26,6 +26,19 @@ def CSR_ILP32D_LP64D
: CalleeSavedRegs<(add CSR_ILP32_LP64,
F8_D, F9_D, (sequence "F%u_D", 18, 27))>;
+defvar CSR_V = (add (sequence "V%u", 1, 7), (sequence "V%u", 24, 31),
+ V2M2, V4M2, V6M2, V24M2, V26M2, V28M2, V30M2,
+ V4M4, V24M4, V28M4, V24M8);
+
+def CSR_ILP32_LP64_V
+ : CalleeSavedRegs<(add CSR_ILP32_LP64, CSR_V)>;
+
+def CSR_ILP32F_LP64F_V
+ : CalleeSavedRegs<(add CSR_ILP32F_LP64F, CSR_V)>;
+
+def CSR_ILP32D_LP64D_V
+ : CalleeSavedRegs<(add CSR_ILP32D_LP64D, CSR_V)>;
+
// Needed for implementation of RISCVRegisterInfo::getNoPreservedMask()
def CSR_NoRegs : CalleeSavedRegs<(add)>;
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 7e3dcb3283caba..0fb607e80efb56 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -388,6 +388,21 @@ getUnmanagedCSI(const MachineFunction &MF,
return NonLibcallCSI;
}
+static SmallVector<CalleeSavedInfo, 8>
+getRVVCalleeSavedInfo(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ SmallVector<CalleeSavedInfo, 8> RVVCSI;
+
+ for (auto &CS : CSI) {
+ int FI = CS.getFrameIdx();
+ if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ RVVCSI.push_back(CS);
+ }
+
+ return RVVCSI;
+}
+
void RISCVFrameLowering::adjustStackForRVV(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -588,6 +603,10 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// directives.
for (const auto &Entry : CSI) {
int FrameIdx = Entry.getFrameIdx();
+ if (FrameIdx >=0 &&
+ MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
+ continue;
+
int64_t Offset = MFI.getObjectOffset(FrameIdx);
Register Reg = Entry.getReg();
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
@@ -724,7 +743,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo());
- // Skip to before the restores of callee-saved registers
+ // Skip to before the restores of scalar callee-saved registers
// FIXME: assumes exactly one instruction is used to restore each
// callee-saved register.
auto LastFrameDestroy = MBBI;
@@ -1025,15 +1044,24 @@ RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
MachineFrameInfo &MFI = MF.getFrameInfo();
// Create a buffer of RVV objects to allocate.
SmallVector<int, 8> ObjectsToAllocate;
- for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
- unsigned StackID = MFI.getStackID(I);
- if (StackID != TargetStackID::ScalableVector)
- continue;
- if (MFI.isDeadObjectIndex(I))
- continue;
+ auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
+ for (int I = FIBegin, E = FIEnd; I != E; ++I) {
+ unsigned StackID = MFI.getStackID(I);
+ if (StackID != TargetStackID::ScalableVector)
+ continue;
+ if (MFI.isDeadObjectIndex(I))
+ continue;
- ObjectsToAllocate.push_back(I);
- }
+ ObjectsToAllocate.push_back(I);
+ }
+ };
+ // First push RVV Callee Saved object, then push RVV stack object
+ std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
+ const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
+ if (RVVCSI.size())
+ pushRVVObjects(RVVCSI[0].getFrameIdx(),
+ RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
+ pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
// The minimum alignment is 16 bytes.
Align RVVStackAlign(16);
@@ -1483,13 +1511,19 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
// Manually spill values not spilled by libcall & Push/Pop.
const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
- for (auto &CS : UnmanagedCSI) {
- // Insert the spill to the stack frame.
- Register Reg = CS.getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
- RC, TRI, Register());
- }
+ const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
+
+ auto storeRegToStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
+ for (auto &CS: CSInfo) {
+ // Insert the spill to the stack frame.
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
+ RC, TRI, Register());
+ }
+ };
+ storeRegToStackSlot(UnmanagedCSI);
+ storeRegToStackSlot(RVVCSI);
return true;
}
@@ -1512,14 +1546,46 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
// first in the epilogue. It increases the opportunity to avoid the
// load-to-use data hazard between loading RA and return by RA.
// loadRegFromStackSlot can insert multiple instructions.
+ //
+ //
+ // We first change the restore order for scalar and vector
+ // callee-saved registers as the layout shown below:
+ //
+ // Epilog restore order (original):
+ // ----------------------------
+ // RVV objects
+ // ----------------------------
+ // Callee-saved regs(scalar)
+ // Callee-saved regs(vector)
+ // ----------------------------
+ //
+ // Epilog restore order (after):
+ // ----------------------------
+ // RVV objects
+ // ----------------------------
+ // Callee-saved regs(vector)
+ // Callee-saved regs(scalar)
+ // ----------------------------
+ //
+ // So that it is able to put all vector registers which need
+ // to be restored together. The return address will be restored
+ // first in the scalar regs. It increases the opportunity to avoid the
+ // load-to-use data hazard between loading RA and return by RA.
+ // loadRegFromStackSlot can insert multiple instructions.
const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
- for (auto &CS : UnmanagedCSI) {
- Register Reg = CS.getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
- Register());
- assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
- }
+ const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
+
+ auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
+ for (auto &CS: CSInfo) {
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
+ Register());
+ assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
+ }
+ };
+ loadRegFromStackSlot(RVVCSI);
+ loadRegFromStackSlot(UnmanagedCSI);
RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
if (RVFI->isPushable(*MF)) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9ab6895aed521e..c353aa8e27514c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18454,6 +18454,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
case CallingConv::Fast:
case CallingConv::SPIR_KERNEL:
case CallingConv::GRAAL:
+ case CallingConv::RISCV_VectorCall:
break;
case CallingConv::GHC:
if (Subtarget.isRVE())
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index ca519dbc4c0359..6a7e15cb635a78 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -69,6 +69,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
: CSR_Interrupt_SaveList;
}
+ bool HasVectorCSR =
+ MF->getFunction().getCallingConv() == CallingConv::RISCV_VectorCall;
+
switch (Subtarget.getTargetABI()) {
default:
llvm_unreachable("Unrecognized ABI");
@@ -77,12 +80,18 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_ILP32E_LP64E_SaveList;
case RISCVABI::ABI_ILP32:
case RISCVABI::ABI_LP64:
+ if (HasVectorCSR)
+ return CSR_ILP32_LP64_V_SaveList;
return CSR_ILP32_LP64_SaveList;
case RISCVABI::ABI_ILP32F:
case RISCVABI::ABI_LP64F:
+ if (HasVectorCSR)
+ return CSR_ILP32F_LP64F_V_SaveList;
return CSR_ILP32F_LP64F_SaveList;
case RISCVABI::ABI_ILP32D:
case RISCVABI::ABI_LP64D:
+ if (HasVectorCSR)
+ return CSR_ILP32D_LP64D_V_SaveList;
return CSR_ILP32D_LP64D_SaveList;
}
}
@@ -657,12 +666,18 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
return CSR_ILP32E_LP64E_RegMask;
case RISCVABI::ABI_ILP32:
case RISCVABI::ABI_LP64:
+ if (CC == CallingConv::RISCV_VectorCall)
+ return CSR_ILP32_LP64_V_RegMask;
return CSR_ILP32_LP64_RegMask;
case RISCVABI::ABI_ILP32F:
case RISCVABI::ABI_LP64F:
+ if (CC == CallingConv::RISCV_VectorCall)
+ return CSR_ILP32F_LP64F_V_RegMask;
return CSR_ILP32F_LP64F_RegMask;
case RISCVABI::ABI_ILP32D:
case RISCVABI::ABI_LP64D:
+ if (CC == CallingConv::RISCV_VectorCall)
+ return CSR_ILP32D_LP64D_V_RegMask;
return CSR_ILP32D_LP64D_RegMask;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
new file mode 100644
index 00000000000000..84936d88e1874f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -mattr=+v -O2 < %s \
+; RUN: | FileCheck --check-prefix=SPILL-O2 %s
+
+define <vscale x 1 x i32> @test_vector_std(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_std:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
+
+define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_callee:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl2r.v v2, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
>From 8fa50a0d728c3c6f7736c5405a5128fb06c4a91f Mon Sep 17 00:00:00 2001
From: 4vtomat <brandon.wu at sifive.com>
Date: Mon, 17 Jul 2023 01:26:27 -0700
Subject: [PATCH 02/10] [RISCV] Add CFI information for vector callee-saved
registers
Currently the CFI offset for RVV registers are not handled entirely,
this patch add those information for either stack unwinding or
debugger to work correctly on RVV callee-saved stack object.
Depends On D154576
Differential Revision: https://reviews.llvm.org/D156846
---
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 144 +++++++++++++++---
llvm/lib/Target/RISCV/RISCVFrameLowering.h | 5 +
.../early-clobber-tied-def-subreg-liveness.ll | 1 +
.../RISCV/intrinsic-cttz-elts-vscale.ll | 1 +
llvm/test/CodeGen/RISCV/pr69586.ll | 1 +
...regalloc-last-chance-recoloring-failure.ll | 2 +
llvm/test/CodeGen/RISCV/rvv-cfi-info.ll | 119 +++++++++++++++
llvm/test/CodeGen/RISCV/rvv/abs-vp.ll | 1 +
.../RISCV/rvv/access-fixed-objects-by-rvv.ll | 1 +
.../RISCV/rvv/addi-scalable-offset.mir | 1 +
.../rvv/alloca-load-store-scalable-array.ll | 1 +
.../rvv/alloca-load-store-scalable-struct.ll | 1 +
.../CodeGen/RISCV/rvv/bitreverse-sdnode.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll | 7 +
llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll | 7 +
.../CodeGen/RISCV/rvv/calling-conv-fastcc.ll | 8 +
llvm/test/CodeGen/RISCV/rvv/calling-conv.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll | 4 +
.../test/CodeGen/RISCV/rvv/emergency-slot.mir | 1 +
llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll | 4 +
.../CodeGen/RISCV/rvv/extractelt-int-rv32.ll | 2 +
.../CodeGen/RISCV/rvv/extractelt-int-rv64.ll | 2 +
.../RISCV/rvv/fixed-vectors-bitreverse-vp.ll | 7 +
.../RISCV/rvv/fixed-vectors-bswap-vp.ll | 7 +
.../RISCV/rvv/fixed-vectors-ceil-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-ctlz-vp.ll | 6 +
.../RISCV/rvv/fixed-vectors-ctpop-vp.ll | 3 +
.../RISCV/rvv/fixed-vectors-cttz-vp.ll | 6 +
.../RISCV/rvv/fixed-vectors-floor-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-fmaximum-vp.ll | 3 +
.../RISCV/rvv/fixed-vectors-fminimum-vp.ll | 3 +
.../RISCV/rvv/fixed-vectors-fp-interleave.ll | 1 +
.../RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll | 126 +++++++++++++++
.../rvv/fixed-vectors-insert-subvector.ll | 2 +
.../RISCV/rvv/fixed-vectors-int-interleave.ll | 1 +
.../rvv/fixed-vectors-interleaved-access.ll | 2 +
.../CodeGen/RISCV/rvv/fixed-vectors-llrint.ll | 8 +
.../RISCV/rvv/fixed-vectors-nearbyint-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-reduction-fp.ll | 8 +
.../RISCV/rvv/fixed-vectors-reduction-int.ll | 4 +
.../RISCV/rvv/fixed-vectors-rint-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-round-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-roundeven-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-roundtozero-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-setcc-fp-vp.ll | 3 +
.../RISCV/rvv/fixed-vectors-setcc-int-vp.ll | 3 +
.../RISCV/rvv/fixed-vectors-trunc-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-vcopysign-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-vfma-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vfmax-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-vfmin-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-vfmuladd-vp.ll | 2 +
.../CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll | 2 +
.../CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll | 2 +
.../CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll | 2 +
.../RISCV/rvv/fixed-vectors-vpmerge.ll | 1 +
.../RISCV/rvv/fixed-vectors-vpscatter.ll | 4 +
.../RISCV/rvv/fixed-vectors-vscale-range.ll | 1 +
.../RISCV/rvv/fixed-vectors-vselect-vp.ll | 4 +
.../CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll | 3 +
.../RISCV/rvv/fixed-vectors-vwmulsu.ll | 3 +
.../CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll | 1 +
.../test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll | 8 +
.../test/CodeGen/RISCV/rvv/fminimum-sdnode.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll | 8 +
.../CodeGen/RISCV/rvv/fpclamptosat_vec.ll | 24 +++
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll | 12 ++
.../CodeGen/RISCV/rvv/get-vlen-debugloc.mir | 1 +
.../RISCV/rvv/large-rvv-stack-size.mir | 1 +
llvm/test/CodeGen/RISCV/rvv/localvar.ll | 8 +
llvm/test/CodeGen/RISCV/rvv/memory-args.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll | 1 +
.../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll | 1 +
.../RISCV/rvv/named-vector-shuffle-reverse.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll | 3 +
.../CodeGen/RISCV/rvv/no-reserved-frame.ll | 1 +
.../CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/round-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll | 3 +
.../test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll | 2 +
.../test/CodeGen/RISCV/rvv/strided-vpstore.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll | 2 +
.../RISCV/rvv/vector-deinterleave-load.ll | 1 +
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 2 +
.../RISCV/rvv/vector-interleave-store.ll | 1 +
.../CodeGen/RISCV/rvv/vector-interleave.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 2 +
.../RISCV/rvv/vfmadd-constrained-sdnode.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll | 2 +
.../RISCV/rvv/vfmsub-constrained-sdnode.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll | 2 +
.../RISCV/rvv/vfnmadd-constrained-sdnode.ll | 2 +
.../RISCV/rvv/vfnmsub-constrained-sdnode.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll | 1 +
.../CodeGen/RISCV/rvv/vpscatter-sdnode.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/vpstore.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir | 1 +
121 files changed, 690 insertions(+), 17 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 0fb607e80efb56..a5e8b22227d516 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -435,6 +435,32 @@ void RISCVFrameLowering::adjustStackForRVV(MachineFunction &MF,
Flag, getStackAlign());
}
+static void appendScalableVectorExpression(SmallVectorImpl<char> &Expr,
+ int FixedOffset, int ScalableOffset,
+ unsigned DwarfVlenb,
+ llvm::raw_string_ostream &Comment) {
+ uint8_t buffer[16];
+ if (FixedOffset) {
+ Expr.push_back(dwarf::DW_OP_consts);
+ Expr.append(buffer, buffer + encodeSLEB128(FixedOffset, buffer));
+ Expr.push_back((uint8_t)dwarf::DW_OP_plus);
+ Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(FixedOffset);
+ }
+
+ Expr.push_back((uint8_t)dwarf::DW_OP_consts);
+ Expr.append(buffer, buffer + encodeSLEB128(ScalableOffset, buffer));
+
+ Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
+ Expr.append(buffer, buffer + encodeULEB128(DwarfVlenb, buffer));
+ Expr.push_back(0);
+
+ Expr.push_back((uint8_t)dwarf::DW_OP_mul);
+ Expr.push_back((uint8_t)dwarf::DW_OP_plus);
+
+ Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(ScalableOffset)
+ << " * vlenb";
+}
+
static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
Register Reg,
uint64_t FixedOffset,
@@ -452,29 +478,40 @@ static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
else
Comment << printReg(Reg, &TRI);
- uint8_t buffer[16];
- if (FixedOffset) {
- Expr.push_back(dwarf::DW_OP_consts);
- Expr.append(buffer, buffer + encodeSLEB128(FixedOffset, buffer));
- Expr.push_back((uint8_t)dwarf::DW_OP_plus);
- Comment << " + " << FixedOffset;
- }
+ appendScalableVectorExpression(
+ Expr, FixedOffset, ScalableOffset,
+ TRI.getDwarfRegNum(RISCV::VLENB, true), Comment);
- Expr.push_back((uint8_t)dwarf::DW_OP_consts);
- Expr.append(buffer, buffer + encodeSLEB128(ScalableOffset, buffer));
+ SmallString<64> DefCfaExpr;
+ uint8_t buffer[16];
+ DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
+ DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer));
+ DefCfaExpr.append(Expr.str());
- unsigned DwarfVlenb = TRI.getDwarfRegNum(RISCV::VLENB, true);
- Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
- Expr.append(buffer, buffer + encodeULEB128(DwarfVlenb, buffer));
- Expr.push_back(0);
+ return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
+ Comment.str());
+}
- Expr.push_back((uint8_t)dwarf::DW_OP_mul);
- Expr.push_back((uint8_t)dwarf::DW_OP_plus);
+static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI,
+ Register Reg,
+ uint64_t FixedOffset,
+ uint64_t ScalableOffset) {
+ assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
+ SmallString<64> Expr;
+ std::string CommentBuffer;
+ llvm::raw_string_ostream Comment(CommentBuffer);
+ Comment << printReg(Reg, &TRI) << " @ cfa";
- Comment << " + " << ScalableOffset << " * vlenb";
+ // Build up the expression (FixedOffset + ScalableOffset * VLENB).
+ appendScalableVectorExpression(
+ Expr, FixedOffset, ScalableOffset,
+ TRI.getDwarfRegNum(RISCV::VLENB, true), Comment);
SmallString<64> DefCfaExpr;
- DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
+ uint8_t buffer[16];
+ unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
+ DefCfaExpr.push_back(dwarf::DW_CFA_expression);
+ DefCfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer));
DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer));
DefCfaExpr.append(Expr.str());
@@ -669,6 +706,9 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
}
+
+ std::advance(MBBI, getRVVCalleeSavedInfo(MF, CSI).size());
+ emitCalleeSavedRVVPrologCFI(MBB, MBBI, hasFP(MF));
}
if (hasFP(MF)) {
@@ -755,6 +795,9 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
uint64_t RVVStackSize = RVFI->getRVVStackSize();
+ if (RVVStackSize)
+ emitCalleeSavedRVVEpilogCFI(MBB, LastFrameDestroy);
+
// Restore the stack pointer using the value of the frame pointer. Only
// necessary if the stack pointer was modified, meaning the stack size is
// unknown.
@@ -777,6 +820,15 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
MachineInstr::FrameDestroy);
}
+ if (RVVStackSize) {
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
+ nullptr, RI->getDwarfRegNum(SPReg, true), RealStackSize));
+ BuildMI(MBB, LastFrameDestroy, DL,
+ STI.getInstrInfo()->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameDestroy);
+ }
+
uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
if (FirstSPAdjustAmount) {
uint64_t SecondSPAdjustAmount =
@@ -1528,6 +1580,64 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
return true;
}
+void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, bool HasFP) const {
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = MF->getFrameInfo();
+ RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
+ if (RVVCSI.empty())
+ return;
+
+ uint64_t FixedSize = getStackSizeWithRVVPadding(*MF) +
+ RVFI->getLibCallStackSize() +
+ RVFI->getRVPushStackSize();
+ if (!HasFP) {
+ uint64_t ScalarLocalVarSize =
+ MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
+ RVFI->getRVPushStackSize() - RVFI->getVarArgsSaveSize() +
+ RVFI->getRVVPadding();
+ FixedSize -= ScalarLocalVarSize;
+ }
+
+ for (auto &CS: RVVCSI) {
+ // Insert the spill to the stack frame.
+ int FI = CS.getFrameIdx();
+ if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+ unsigned CFIIndex = MF->addFrameInst(createDefCFAOffset(
+ *STI.getRegisterInfo(), CS.getReg(),
+ -FixedSize, MFI.getObjectOffset(FI) / 8));
+ BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+}
+
+void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = MF->getFrameInfo();
+ const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
+ const TargetInstrInfo &TII = *STI.getInstrInfo();
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
+ if (RVVCSI.empty())
+ return;
+
+ for (const auto &CS : RVVCSI) {
+ unsigned CFIIndex = MF->addFrameInst(MCCFIInstruction::createRestore(
+ nullptr, TRI.getDwarfRegNum(CS.getReg(), true)));
+ BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex)
+ .setMIFlags(MachineInstr::FrameDestroy);
+ }
+}
+
bool RISCVFrameLowering::restoreCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
index 210f8c1064724a..1832af8ca0a391 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -88,6 +88,11 @@ class RISCVFrameLowering : public TargetFrameLowering {
void adjustStackForRVV(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
int64_t Amount, MachineInstr::MIFlag Flag) const;
+ void emitCalleeSavedRVVPrologCFI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ bool HasFP) const;
+ void emitCalleeSavedRVVEpilogCFI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const;
std::pair<int64_t, Align>
assignRVVStackObjectOffsets(MachineFunction &MF) const;
};
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 83a4f63add337f..04075a49add37f 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -85,6 +85,7 @@ define void @_Z3foov() {
; CHECK-NEXT: li a1, 10
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index bafa92e06834ac..6614561ec4b5b4 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -104,6 +104,7 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 1
; RV32-NEXT: add sp, sp, a2
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index 2d5fce2ca4970e..a5d4809947f4cb 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -763,6 +763,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: li a1, 6
; NOREMAT-NEXT: mul a0, a0, a1
; NOREMAT-NEXT: add sp, sp, a0
+; NOREMAT-NEXT: .cfi_def_cfa sp, 400
; NOREMAT-NEXT: ld ra, 392(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: ld s0, 384(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: ld s1, 376(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 3ce56318426ad2..a397561724330d 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -72,6 +72,7 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 32
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 32
@@ -133,6 +134,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: csrr a0, vlenb
; SUBREGLIVENESS-NEXT: slli a0, a0, 4
; SUBREGLIVENESS-NEXT: add sp, sp, a0
+; SUBREGLIVENESS-NEXT: .cfi_def_cfa sp, 32
; SUBREGLIVENESS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; SUBREGLIVENESS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; SUBREGLIVENESS-NEXT: addi sp, sp, 32
diff --git a/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll b/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
new file mode 100644
index 00000000000000..6d9c4913d5d916
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=OMIT-FP %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs -frame-pointer=all < %s \
+; RUN: | FileCheck -check-prefix=NO-OMIT-FP %s
+
+define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee_cfi(<vscale x 1 x i32> %va) {
+; OMIT-FP-LABEL: test_vector_callee_cfi:
+; OMIT-FP: # %bb.0: # %entry
+; OMIT-FP-NEXT: addi sp, sp, -16
+; OMIT-FP-NEXT: .cfi_def_cfa_offset 16
+; OMIT-FP-NEXT: csrr a0, vlenb
+; OMIT-FP-NEXT: slli a0, a0, 3
+; OMIT-FP-NEXT: sub sp, sp, a0
+; OMIT-FP-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; OMIT-FP-NEXT: csrr a0, vlenb
+; OMIT-FP-NEXT: li a1, 6
+; OMIT-FP-NEXT: mul a0, a0, a1
+; OMIT-FP-NEXT: add a0, sp, a0
+; OMIT-FP-NEXT: addi a0, a0, 16
+; OMIT-FP-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill
+; OMIT-FP-NEXT: csrr a0, vlenb
+; OMIT-FP-NEXT: slli a0, a0, 2
+; OMIT-FP-NEXT: add a0, sp, a0
+; OMIT-FP-NEXT: addi a0, a0, 16
+; OMIT-FP-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill
+; OMIT-FP-NEXT: addi a0, sp, 16
+; OMIT-FP-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill
+; OMIT-FP-NEXT: .cfi_escape 0x10, 0x61, 0x08, 0x11, 0x7e, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v1 @ cfa - 2 * vlenb
+; OMIT-FP-NEXT: .cfi_escape 0x10, 0x62, 0x08, 0x11, 0x7c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v2m2 @ cfa - 4 * vlenb
+; OMIT-FP-NEXT: .cfi_escape 0x10, 0x64, 0x08, 0x11, 0x78, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v4m4 @ cfa - 8 * vlenb
+; OMIT-FP-NEXT: #APP
+; OMIT-FP-NEXT: #NO_APP
+; OMIT-FP-NEXT: csrr a0, vlenb
+; OMIT-FP-NEXT: li a1, 6
+; OMIT-FP-NEXT: mul a0, a0, a1
+; OMIT-FP-NEXT: add a0, sp, a0
+; OMIT-FP-NEXT: addi a0, a0, 16
+; OMIT-FP-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
+; OMIT-FP-NEXT: csrr a0, vlenb
+; OMIT-FP-NEXT: slli a0, a0, 2
+; OMIT-FP-NEXT: add a0, sp, a0
+; OMIT-FP-NEXT: addi a0, a0, 16
+; OMIT-FP-NEXT: vl2r.v v2, (a0) # Unknown-size Folded Reload
+; OMIT-FP-NEXT: addi a0, sp, 16
+; OMIT-FP-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; OMIT-FP-NEXT: .cfi_restore v1
+; OMIT-FP-NEXT: .cfi_restore v2
+; OMIT-FP-NEXT: .cfi_restore v4
+; OMIT-FP-NEXT: csrr a0, vlenb
+; OMIT-FP-NEXT: slli a0, a0, 3
+; OMIT-FP-NEXT: add sp, sp, a0
+; OMIT-FP-NEXT: .cfi_def_cfa sp, 16
+; OMIT-FP-NEXT: addi sp, sp, 16
+; OMIT-FP-NEXT: ret
+;
+; NO-OMIT-FP-LABEL: test_vector_callee_cfi:
+; NO-OMIT-FP: # %bb.0: # %entry
+; NO-OMIT-FP-NEXT: addi sp, sp, -32
+; NO-OMIT-FP-NEXT: .cfi_def_cfa_offset 32
+; NO-OMIT-FP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; NO-OMIT-FP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; NO-OMIT-FP-NEXT: .cfi_offset ra, -8
+; NO-OMIT-FP-NEXT: .cfi_offset s0, -16
+; NO-OMIT-FP-NEXT: addi s0, sp, 32
+; NO-OMIT-FP-NEXT: .cfi_def_cfa s0, 0
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 3
+; NO-OMIT-FP-NEXT: sub sp, sp, a0
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 1
+; NO-OMIT-FP-NEXT: sub a0, s0, a0
+; NO-OMIT-FP-NEXT: addi a0, a0, -32
+; NO-OMIT-FP-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 2
+; NO-OMIT-FP-NEXT: sub a0, s0, a0
+; NO-OMIT-FP-NEXT: addi a0, a0, -32
+; NO-OMIT-FP-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 3
+; NO-OMIT-FP-NEXT: sub a0, s0, a0
+; NO-OMIT-FP-NEXT: addi a0, a0, -32
+; NO-OMIT-FP-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill
+; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x61, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7e, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v1 @ cfa - 32 - 2 * vlenb
+; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x62, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v2m2 @ cfa - 32 - 4 * vlenb
+; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x64, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v4m4 @ cfa - 32 - 8 * vlenb
+; NO-OMIT-FP-NEXT: #APP
+; NO-OMIT-FP-NEXT: #NO_APP
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 1
+; NO-OMIT-FP-NEXT: sub a0, s0, a0
+; NO-OMIT-FP-NEXT: addi a0, a0, -32
+; NO-OMIT-FP-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 2
+; NO-OMIT-FP-NEXT: sub a0, s0, a0
+; NO-OMIT-FP-NEXT: addi a0, a0, -32
+; NO-OMIT-FP-NEXT: vl2r.v v2, (a0) # Unknown-size Folded Reload
+; NO-OMIT-FP-NEXT: csrr a0, vlenb
+; NO-OMIT-FP-NEXT: slli a0, a0, 3
+; NO-OMIT-FP-NEXT: sub a0, s0, a0
+; NO-OMIT-FP-NEXT: addi a0, a0, -32
+; NO-OMIT-FP-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; NO-OMIT-FP-NEXT: .cfi_restore v1
+; NO-OMIT-FP-NEXT: .cfi_restore v2
+; NO-OMIT-FP-NEXT: .cfi_restore v4
+; NO-OMIT-FP-NEXT: addi sp, s0, -32
+; NO-OMIT-FP-NEXT: .cfi_def_cfa sp, 32
+; NO-OMIT-FP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; NO-OMIT-FP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; NO-OMIT-FP-NEXT: addi sp, sp, 32
+; NO-OMIT-FP-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7}"()
+
+ ret <vscale x 1 x i32> %va
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 8898ce509ecb7a..991b10fbe09a11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -650,6 +650,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index e578aada5a9cfe..4e2e5e87232e1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -46,6 +46,7 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: add sp, sp, a0
+; RV64IV-NEXT: .cfi_def_cfa sp, 528
; RV64IV-NEXT: addi sp, sp, 528
; RV64IV-NEXT: ret
%local = alloca i64
diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
index a54da97d2548a1..eee69f7b889ddd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
@@ -50,6 +50,7 @@ body: |
; CHECK-NEXT: VS1R_V killed renamable $v8, killed renamable $x10
; CHECK-NEXT: $x2 = frame-destroy ADDI $x8, -2048
; CHECK-NEXT: $x2 = frame-destroy ADDI killed $x2, -224
+ ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 2272
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 240
; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
index 1fe91c721f4dd2..ecfe5359fd5afc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
@@ -30,6 +30,7 @@ define void @test(ptr %addr) {
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index a9a680d54d5897..e82917469b182b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -30,6 +30,7 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
index 19ae26d2424260..e8751e50c7a6df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
@@ -1209,6 +1209,7 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 66eab2f6536224..36614c36c32488 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -2438,6 +2438,7 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -2515,6 +2516,7 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
@@ -2610,6 +2612,7 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -2798,6 +2801,7 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -2875,6 +2879,7 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
@@ -2970,6 +2975,7 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -3132,6 +3138,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
index d3fce3caf8aef9..8c45597f246447 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
@@ -553,6 +553,7 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 800dc7ec388591..e45968b65922d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -1128,6 +1128,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1178,6 +1179,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
@@ -1242,6 +1244,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1373,6 +1376,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1423,6 +1427,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
@@ -1487,6 +1492,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1586,6 +1592,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 187f758b780204..333e975a8da7c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -184,6 +184,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(ptr %x) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <vscale x 128 x i32>, ptr %x
@@ -272,6 +273,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_param_nxv32i32_nxv32i32_nxv32i32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%r = add <vscale x 32 x i32> %x, %y
@@ -315,6 +317,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV32-NEXT: vmv8r.v v16, v24
; RV32-NEXT: call ext2
; RV32-NEXT: addi sp, s0, -144
+; RV32-NEXT: .cfi_def_cfa sp, 144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
@@ -349,6 +352,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV64-NEXT: vmv8r.v v16, v24
; RV64-NEXT: call ext2
; RV64-NEXT: addi sp, s0, -144
+; RV64-NEXT: .cfi_def_cfa sp, 144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
@@ -421,6 +425,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: call ext3
; RV32-NEXT: addi sp, s0, -144
+; RV32-NEXT: .cfi_def_cfa sp, 144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
@@ -489,6 +494,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: call ext3
; RV64-NEXT: addi sp, s0, -144
+; RV64-NEXT: .cfi_def_cfa sp, 144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
@@ -564,6 +570,7 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: call vector_arg_indirect_stack
; RV32-NEXT: addi sp, s0, -144
+; RV32-NEXT: .cfi_def_cfa sp, 144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
@@ -615,6 +622,7 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: call vector_arg_indirect_stack
; RV64-NEXT: addi sp, s0, -144
+; RV64-NEXT: .cfi_def_cfa sp, 144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 78385a80b47ebd..8a2839d31aaa07 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -47,6 +47,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: call callee_scalable_vector_split_indirect
; RV32-NEXT: addi sp, s0, -144
+; RV32-NEXT: .cfi_def_cfa sp, 144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
@@ -78,6 +79,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: call callee_scalable_vector_split_indirect
; RV64-NEXT: addi sp, s0, -144
+; RV64-NEXT: .cfi_def_cfa sp, 144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index edc348ebc68ff3..21f8e635b04944 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -798,6 +798,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index e4f030a642f7d1..5984df39f1c554 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -2615,6 +2615,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
}
define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64_unmasked(<vscale x 16 x i64> %va, i32 zeroext %evl) {
+;
; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 2310f85b1fba93..6b77583f5d63d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2288,6 +2288,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -2373,6 +2374,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
@@ -2505,6 +2507,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 145ce6e917f962..b5c1b154d502af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2530,6 +2530,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -2622,6 +2623,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
@@ -2773,6 +2775,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -4151,6 +4154,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 600084632ce68a..c1a49fd51c8474 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -152,6 +152,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $x2 = frame-destroy ADDI $x8, -2048
; CHECK-NEXT: $x2 = frame-destroy ADDI killed $x2, -256
+ ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 2304
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 272
; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index f1ac9aa53f5795..c2c13b9b519bfc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -615,6 +615,7 @@ define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: fld fa0, -8(a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
@@ -652,6 +653,7 @@ define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: fld fa0, 0(a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 80
@@ -702,6 +704,7 @@ define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 zeroext %i
; RV32-NEXT: vs8r.v v16, (a1)
; RV32-NEXT: fld fa0, 0(a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
@@ -737,6 +740,7 @@ define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 zeroext %i
; RV64-NEXT: vs8r.v v16, (a1)
; RV64-NEXT: fld fa0, 0(a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index d9fdec3041cb06..337eb8f455e60d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -895,6 +895,7 @@ define i32 @extractelt_nxv32i32_neg1(<vscale x 32 x i32> %v) {
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: lw a0, -4(a0)
; CHECK-NEXT: addi sp, s0, -80
+; CHECK-NEXT: .cfi_def_cfa sp, 80
; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
@@ -945,6 +946,7 @@ define i32 @extractelt_nxv32i32_idx(<vscale x 32 x i32> %v, i32 %idx) {
; CHECK-NEXT: vs8r.v v16, (a1)
; CHECK-NEXT: lw a0, 0(a0)
; CHECK-NEXT: addi sp, s0, -80
+; CHECK-NEXT: .cfi_def_cfa sp, 80
; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index fcee77ae8d8e95..1a23f5060662cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -889,6 +889,7 @@ define i64 @extractelt_nxv16i64_neg1(<vscale x 16 x i64> %v) {
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: ld a0, 0(a0)
; CHECK-NEXT: addi sp, s0, -80
+; CHECK-NEXT: .cfi_def_cfa sp, 80
; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
@@ -939,6 +940,7 @@ define i64 @extractelt_nxv16i64_idx(<vscale x 16 x i64> %v, i32 zeroext %idx) {
; CHECK-NEXT: vs8r.v v16, (a1)
; CHECK-NEXT: ld a0, 0(a0)
; CHECK-NEXT: addi sp, s0, -80
+; CHECK-NEXT: .cfi_def_cfa sp, 80
; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 91bf3e981e0a6c..f44852b0a10462 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -1797,6 +1797,7 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -1874,6 +1875,7 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
@@ -1972,6 +1974,7 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -2163,6 +2166,7 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -2240,6 +2244,7 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
@@ -2338,6 +2343,7 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -2491,6 +2497,7 @@ define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 ze
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
index 6308f73e219da1..d773449f4149c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
@@ -856,6 +856,7 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -906,6 +907,7 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
@@ -964,6 +966,7 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1089,6 +1092,7 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1139,6 +1143,7 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
@@ -1197,6 +1202,7 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1287,6 +1293,7 @@ define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 194179f9f470e1..dd6df9d4062c15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -827,6 +827,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
index 37e6c35196c6a4..39c24eaecc4330 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
@@ -2275,6 +2275,7 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -2386,6 +2387,7 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
@@ -2537,6 +2539,7 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -4864,6 +4867,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -4975,6 +4979,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl)
@@ -5126,6 +5131,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 08f7e2058ad29e..1f36279d2eacf5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1684,6 +1684,7 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -1768,6 +1769,7 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
@@ -1903,6 +1905,7 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
index 082ac1871e9409..98faa4875b71e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
@@ -1935,6 +1935,7 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -2026,6 +2027,7 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
@@ -2157,6 +2159,7 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -4124,6 +4127,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
@@ -4215,6 +4219,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl)
@@ -4346,6 +4351,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 583742224f8cf1..decbe944d5d751 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -827,6 +827,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 4a5ef21efdb968..4c159fe6241f53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -598,6 +598,7 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.maximum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -723,6 +724,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
@@ -806,6 +808,7 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 8ea08bdedd6d34..9de1fcaa3821bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -598,6 +598,7 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -723,6 +724,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
@@ -806,6 +808,7 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index 6bfd0ac932672f..93159f9640b472 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -275,6 +275,7 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
; V128-NEXT: csrr a0, vlenb
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: add sp, sp, a0
+; V128-NEXT: .cfi_def_cfa sp, 16
; V128-NEXT: addi sp, sp, 16
; V128-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
index a2ff77625b7584..89340494256ac8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
@@ -692,8 +692,71 @@ define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
+; RV32-LABEL: fshr_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v24, (a0)
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsll.vi v16, v8, 1, v0.t
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v8, -1
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vxor.vv v8, v24, v8, v0.t
+; RV32-NEXT: li a0, 63
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsll.vv v8, v16, v8, v0.t
+; RV32-NEXT: vand.vx v16, v24, a0, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vv v16, v24, v16, v0.t
+; RV32-NEXT: vor.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+; RV64-LABEL: fshr_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v24, (a0)
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v16, v8, 1, v0.t
+; RV64-NEXT: li a0, 63
+; RV64-NEXT: vnot.v v8, v24, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsll.vv v8, v16, v8, v0.t
+; RV64-NEXT: vand.vx v16, v24, a0, v0.t
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsrl.vv v16, v24, v16, v0.t
+; RV64-NEXT: vor.vv v8, v8, v16, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
%res = call <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
ret <16 x i64> %res
}
@@ -727,8 +790,71 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
+; RV32-LABEL: fshl_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v24, (a0)
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v16, 1, v0.t
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v8, -1
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vxor.vv v8, v24, v8, v0.t
+; RV32-NEXT: li a0, 63
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vv v8, v16, v8, v0.t
+; RV32-NEXT: vand.vx v16, v24, a0, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsll.vv v16, v24, v16, v0.t
+; RV32-NEXT: vor.vv v8, v16, v8, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+; RV64-LABEL: fshl_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v24, (a0)
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsrl.vi v16, v16, 1, v0.t
+; RV64-NEXT: li a0, 63
+; RV64-NEXT: vnot.v v8, v24, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vv v8, v16, v8, v0.t
+; RV64-NEXT: vand.vx v16, v24, a0, v0.t
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsll.vv v16, v24, v16, v0.t
+; RV64-NEXT: vor.vv v8, v16, v8, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
%res = call <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
ret <16 x i64> %res
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index efb1f720f2d096..40b3f14acb6a84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -484,6 +484,7 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
; RV32-NEXT: vs8r.v v8, (a0)
; RV32-NEXT: vs8r.v v16, (a1)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
@@ -517,6 +518,7 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: vs8r.v v16, (a1)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 6da83644413bc2..027c36597bccd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -440,6 +440,7 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
; V128-NEXT: csrr a0, vlenb
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: add sp, sp, a0
+; V128-NEXT: .cfi_def_cfa sp, 16
; V128-NEXT: addi sp, sp, 16
; V128-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index e27ff0a573d5f2..01c1d8f99fe81c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -578,6 +578,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: li a1, 56
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -955,6 +956,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: li a1, 52
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%interleaved.vec = load <48 x i64>, ptr %ptr
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index d55683e653d246..3e84e9903994ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -73,6 +73,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
@@ -167,6 +168,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
@@ -273,6 +275,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
@@ -388,6 +391,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: addi sp, s0, -208
+; RV32-NEXT: .cfi_def_cfa sp, 208
; RV32-NEXT: lw ra, 204(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 200(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 208
@@ -570,6 +574,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: addi sp, s0, -400
+; RV32-NEXT: .cfi_def_cfa sp, 400
; RV32-NEXT: lw ra, 396(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 392(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 400
@@ -727,6 +732,7 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
@@ -821,6 +827,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
@@ -925,6 +932,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: addi sp, s0, -272
+; RV32-NEXT: .cfi_def_cfa sp, 272
; RV32-NEXT: lw ra, 268(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 264(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 272
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 5407eadb160bde..cb4d5cc8bc1ab4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -650,6 +650,7 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index d12bd651a10c0a..c5d5478e016736 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -2058,6 +2058,7 @@ define float @vreduce_fminimum_v64f32(ptr %x) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -2262,6 +2263,7 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -2674,6 +2676,7 @@ define double @vreduce_fminimum_v32f64(ptr %x) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <32 x double>, ptr %x
@@ -2858,6 +2861,7 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <64 x double>, ptr %x
@@ -3386,6 +3390,7 @@ define float @vreduce_fmaximum_v64f32(ptr %x) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -3590,6 +3595,7 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -4002,6 +4008,7 @@ define double @vreduce_fmaximum_v32f64(ptr %x) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <32 x double>, ptr %x
@@ -4186,6 +4193,7 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = load <64 x double>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 6c75c9b9c29498..ed9bb07f6ce323 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -1566,6 +1566,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 4
; RV32-NEXT: add sp, sp, a2
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1611,6 +1612,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: add sp, sp, a1
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = load <64 x i32>, ptr %x
@@ -1665,6 +1667,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 4
; RV32-NEXT: add sp, sp, a2
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1710,6 +1713,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: add sp, sp, a1
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = load <64 x i32>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 3e0fb3009c6b19..ddc0d2ed4744fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -603,6 +603,7 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 96a72d0ddd18b8..b7cc44625addb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -827,6 +827,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 74a43f09542af4..e6bf354f07e6de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -827,6 +827,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 91de65c79bb705..c1a2b33bcbfe83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -827,6 +827,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
index 2ff2529e259a8d..f52a89dc71bc15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
@@ -1191,6 +1191,7 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH32-NEXT: csrr a0, vlenb
; ZVFH32-NEXT: slli a0, a0, 4
; ZVFH32-NEXT: add sp, sp, a0
+; ZVFH32-NEXT: .cfi_def_cfa sp, 16
; ZVFH32-NEXT: addi sp, sp, 16
; ZVFH32-NEXT: ret
;
@@ -1243,6 +1244,7 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH64-NEXT: csrr a0, vlenb
; ZVFH64-NEXT: slli a0, a0, 4
; ZVFH64-NEXT: add sp, sp, a0
+; ZVFH64-NEXT: .cfi_def_cfa sp, 16
; ZVFH64-NEXT: addi sp, sp, 16
; ZVFH64-NEXT: ret
;
@@ -2951,6 +2953,7 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double> %va, <32 x double> %vb, metadata !"oeq", <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index e558d45a3b2d73..42ce9495c41c31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -673,6 +673,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <256 x i1> @llvm.vp.icmp.v256i8(<256 x i8> %va, <256 x i8> %vb, metadata !"eq", <256 x i1> %m, i32 %evl)
@@ -1364,6 +1365,7 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
@@ -1416,6 +1418,7 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%v = call <64 x i1> @llvm.vp.icmp.v64i32(<64 x i32> %va, <64 x i32> %vb, metadata !"eq", <64 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 34b0789d801a3c..4d826160c220ec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -494,6 +494,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 6
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
index f9b67b83f87239..2c8dedc570002b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
@@ -356,6 +356,7 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index 65776339de07a5..0a07018ed408c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -963,6 +963,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
@@ -1030,6 +1031,7 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index ca033c26dba1f7..25da98a26b6727 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -448,6 +448,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 1f1efdaf1ee5ac..abd99cae8466f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -448,6 +448,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
index 28ab179048ca1b..6e1126b64eb34d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
@@ -727,6 +727,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
@@ -794,6 +795,7 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index c9dc75e18774f8..3c8e824381e581 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -123,6 +123,7 @@ define <64 x float> @vfwadd_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <64 x half>, ptr %x
@@ -234,6 +235,7 @@ define <32 x double> @vfwadd_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <32 x float>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
index 8ad858d4c76598..8218c8768d213d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
@@ -123,6 +123,7 @@ define <64 x float> @vfwmul_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <64 x half>, ptr %x
@@ -234,6 +235,7 @@ define <32 x double> @vfwmul_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <32 x float>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index d22781d6a97ac2..1a16b6a808b6d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -123,6 +123,7 @@ define <64 x float> @vfwsub_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <64 x half>, ptr %x
@@ -234,6 +235,7 @@ define <32 x double> @vfwsub_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <32 x float>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index 6fe83fed6fd9c2..c6129f9bd07954 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1245,6 +1245,7 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.merge.v32f64(<32 x i1> %m, <32 x double> %va, <32 x double> %vb, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index 83e3422c44b95d..5f3401c728841f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -1744,6 +1744,7 @@ define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
call void @llvm.vp.scatter.v32f64.v32p0(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m, i32 %evl)
@@ -1828,6 +1829,7 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV64-NEXT: li a1, 10
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <32 x i32> %idxs
@@ -1914,6 +1916,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: li a1, 10
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%eidxs = sext <32 x i32> %idxs to <32 x i64>
@@ -2001,6 +2004,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: li a1, 10
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%eidxs = zext <32 x i32> %idxs to <32 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
index 12d96fbfb88d63..ace538029d9d97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
@@ -107,6 +107,7 @@ define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%c = add <512 x i8> %a, %b
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index d05f580ea7d222..6a49adef97d70c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -201,6 +201,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 %evl)
@@ -259,6 +260,7 @@ define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 129)
@@ -447,6 +449,7 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 %evl)
@@ -604,6 +607,7 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <64 x float> @llvm.vp.select.v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index 2abd34f01c14c0..56d99ed4f3a231 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -307,6 +307,7 @@ define <128 x i16> @vwmul_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
@@ -355,6 +356,7 @@ define <64 x i32> @vwmul_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <64 x i16>, ptr %x
@@ -402,6 +404,7 @@ define <32 x i64> @vwmul_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <32 x i32>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index 921037db2ea99e..b971de8cf6237d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -299,6 +299,7 @@ define <128 x i16> @vwmulsu_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
@@ -347,6 +348,7 @@ define <64 x i32> @vwmulsu_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <64 x i16>, ptr %x
@@ -394,6 +396,7 @@ define <32 x i64> @vwmulsu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <32 x i32>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index bff7ef86c28960..1574d04eb1aa78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -283,6 +283,7 @@ define <128 x i16> @vwmulu_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
@@ -331,6 +332,7 @@ define <64 x i32> @vwmulu_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <64 x i16>, ptr %x
@@ -378,6 +380,7 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%a = load <32 x i32>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index d1cd82d9f7c18f..aad6435bdf3a78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -798,6 +798,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index c954c9a6d0d113..0ea6b7807aef86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -188,6 +188,7 @@ define <vscale x 16 x half> @vfmax_nxv16f16_vv(<vscale x 16 x half> %a, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index f4350a1e1ced31..be9e8d9c53e73e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -360,6 +360,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -406,6 +407,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
@@ -442,6 +444,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
@@ -600,6 +603,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: li a1, 34
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -713,6 +717,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1009,6 +1014,7 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.maximum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
@@ -1165,6 +1171,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: li a1, 42
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -1236,6 +1243,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 567068fdfb1c47..52347a892fb75a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -188,6 +188,7 @@ define <vscale x 16 x half> @vfmin_nxv16f16_vv(<vscale x 16 x half> %a, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 1f8af732e3f995..011ad02662bef4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -360,6 +360,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -406,6 +407,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
@@ -442,6 +444,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
@@ -600,6 +603,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: li a1, 34
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -713,6 +717,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1009,6 +1014,7 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
@@ -1165,6 +1171,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: li a1, 42
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -1236,6 +1243,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 208c881294eed6..f9011d0c4d57f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -497,6 +497,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -665,6 +666,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -847,6 +849,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1489,6 +1492,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 80
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
@@ -1767,6 +1771,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 80
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
@@ -2071,6 +2076,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 80
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
@@ -2258,6 +2264,7 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -2346,6 +2353,7 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -2485,6 +2493,7 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -2665,6 +2674,7 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -2753,6 +2763,7 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -2892,6 +2903,7 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -3784,6 +3796,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -3950,6 +3963,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -4131,6 +4145,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -4761,6 +4776,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 80
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
@@ -5035,6 +5051,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 80
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
@@ -5338,6 +5355,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 80
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
@@ -5529,6 +5547,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -5615,6 +5634,7 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -5731,6 +5751,7 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -5915,6 +5936,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -6001,6 +6023,7 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
@@ -6117,6 +6140,7 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 64
; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index f9f085dcc16143..c4d74df5c9b494 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -231,6 +231,7 @@ define <vscale x 64 x i8> @fshr_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 64 x i8> @llvm.vp.fshr.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 %evl)
@@ -262,6 +263,7 @@ define <vscale x 64 x i8> @fshl_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 64 x i8> @llvm.vp.fshl.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 %evl)
@@ -463,6 +465,7 @@ define <vscale x 32 x i16> @fshr_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 32 x i16> @llvm.vp.fshr.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 %evl)
@@ -494,6 +497,7 @@ define <vscale x 32 x i16> @fshl_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 32 x i16> @llvm.vp.fshl.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 %evl)
@@ -671,6 +675,7 @@ define <vscale x 16 x i32> @fshr_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 16 x i32> @llvm.vp.fshr.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -705,6 +710,7 @@ define <vscale x 16 x i32> @fshl_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 16 x i32> @llvm.vp.fshl.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -846,6 +852,7 @@ define <vscale x 7 x i64> @fshr_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 7 x i64> @llvm.vp.fshr.nxv7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 %evl)
@@ -880,6 +887,7 @@ define <vscale x 7 x i64> @fshl_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 7 x i64> @llvm.vp.fshl.nxv7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 %evl)
@@ -913,6 +921,7 @@ define <vscale x 8 x i64> @fshr_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i64> @llvm.vp.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 %evl)
@@ -947,6 +956,7 @@ define <vscale x 8 x i64> @fshl_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i64> @llvm.vp.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 %evl)
@@ -1134,6 +1144,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: li a1, 56
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 16 x i64> @llvm.vp.fshr.nxv16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -1317,6 +1328,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: li a1, 56
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 16 x i64> @llvm.vp.fshl.nxv16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
index 3b41e92f437309..8f6276fcf97b75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
@@ -36,6 +36,7 @@ body: |
; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 1
; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+ ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 0
; CHECK-NEXT: PseudoRET
bb.0:
bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
index b4d8805b65bd8f..a315fd4d151d08 100644
--- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
@@ -35,6 +35,7 @@
; CHECK-NEXT: call spillslot
; CHECK-NEXT: addi sp, s0, -2048
; CHECK-NEXT: addi sp, sp, -256
+ ; CHECK-NEXT: .cfi_def_cfa sp, 2304
; CHECK-NEXT: addi sp, sp, 272
; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
index 90bf29d7760112..ed6352e2edafe9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
@@ -21,6 +21,7 @@ define void @local_var_mf8() {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: add sp, sp, a0
+; RV64IV-NEXT: .cfi_def_cfa sp, 16
; RV64IV-NEXT: addi sp, sp, 16
; RV64IV-NEXT: ret
%local0 = alloca <vscale x 1 x i8>
@@ -48,6 +49,7 @@ define void @local_var_m1() {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: add sp, sp, a0
+; RV64IV-NEXT: .cfi_def_cfa sp, 16
; RV64IV-NEXT: addi sp, sp, 16
; RV64IV-NEXT: ret
%local0 = alloca <vscale x 8 x i8>
@@ -76,6 +78,7 @@ define void @local_var_m2() {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 2
; RV64IV-NEXT: add sp, sp, a0
+; RV64IV-NEXT: .cfi_def_cfa sp, 16
; RV64IV-NEXT: addi sp, sp, 16
; RV64IV-NEXT: ret
%local0 = alloca <vscale x 16 x i8>
@@ -108,6 +111,7 @@ define void @local_var_m4() {
; RV64IV-NEXT: addi a0, sp, 32
; RV64IV-NEXT: vl4r.v v8, (a0)
; RV64IV-NEXT: addi sp, s0, -48
+; RV64IV-NEXT: .cfi_def_cfa sp, 48
; RV64IV-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64IV-NEXT: addi sp, sp, 48
@@ -142,6 +146,7 @@ define void @local_var_m8() {
; RV64IV-NEXT: addi a0, sp, 64
; RV64IV-NEXT: vl8r.v v8, (a0)
; RV64IV-NEXT: addi sp, s0, -80
+; RV64IV-NEXT: .cfi_def_cfa sp, 80
; RV64IV-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64IV-NEXT: addi sp, sp, 80
@@ -174,6 +179,7 @@ define void @local_var_m2_mix_local_scalar() {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 2
; RV64IV-NEXT: add sp, sp, a0
+; RV64IV-NEXT: .cfi_def_cfa sp, 16
; RV64IV-NEXT: addi sp, sp, 16
; RV64IV-NEXT: ret
%local_scalar0 = alloca i32
@@ -223,6 +229,7 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
; RV64IV-NEXT: addi a0, a0, -32
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: addi sp, s0, -32
+; RV64IV-NEXT: .cfi_def_cfa sp, 32
; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -277,6 +284,7 @@ define void @local_var_m2_with_bp(i64 %n) {
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: lw zero, 120(s1)
; RV64IV-NEXT: addi sp, s0, -256
+; RV64IV-NEXT: .cfi_def_cfa sp, 256
; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index 87dd0048d928a9..cea8131cffb950 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -60,6 +60,7 @@ define <vscale x 64 x i8> @caller() {
; RV64IV-NEXT: vs8r.v v24, (a1)
; RV64IV-NEXT: call callee
; RV64IV-NEXT: addi sp, s0, -80
+; RV64IV-NEXT: .cfi_def_cfa sp, 80
; RV64IV-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64IV-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 07dcddd9c68601..087dbc94af19dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1252,6 +1252,7 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 139e1ea262b713..60af2c75c7c821 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -1742,6 +1742,7 @@ define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index 4e08f401ca4e90..ffa7d4eb9d46b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -1699,6 +1699,7 @@ define <vscale x 12 x i64> @reverse_nxv12i64(<vscale x 12 x i64> %a) {
; RV32-NEXT: vl8re64.v v16, (a0)
; RV32-NEXT: vl8re64.v v8, (a1)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
@@ -1735,6 +1736,7 @@ define <vscale x 12 x i64> @reverse_nxv12i64(<vscale x 12 x i64> %a) {
; RV64-NEXT: vl8re64.v v16, (a0)
; RV64-NEXT: vl8re64.v v8, (a1)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 7c354c3714c6f4..a75c5ad509224b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -524,6 +524,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -607,6 +608,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1150,6 +1152,7 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
index 47b88ba71d5560..e45cd594fa84d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
@@ -42,6 +42,7 @@ define signext i32 @foo(i32 signext %aa) #0 {
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: li a0, 0
; CHECK-NEXT: addi sp, s0, -96
+; CHECK-NEXT: .cfi_def_cfa sp, 96
; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
index 600ac594380b39..e21f266347dbd5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
@@ -47,6 +47,7 @@ define void @foo(ptr nocapture noundef %p1) {
; CHECK-NEXT: vfadd.vv v8, v10, v8
; CHECK-NEXT: vse32.v v8, (s2)
; CHECK-NEXT: addi sp, s0, -192
+; CHECK-NEXT: .cfi_def_cfa sp, 192
; CHECK-NEXT: ld ra, 184(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 176(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 168(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 04761d4e7bfc4a..35568fd508a3c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -479,6 +479,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -556,6 +557,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1044,6 +1046,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index 16bd665dd0de98..c22743ccd628fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -525,6 +525,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -608,6 +609,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1140,6 +1142,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 429ddb6c71be34..657c8d3be1223e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -525,6 +525,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -608,6 +609,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1140,6 +1142,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index c854e0fb8a05d9..8298a378f62342 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -525,6 +525,7 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -608,6 +609,7 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
@@ -1140,6 +1142,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index c164b62a679be0..47fbe4810f2ab7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -51,6 +51,7 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
; CHECK-NEXT: call bar
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: addi sp, s0, -96
+; CHECK-NEXT: .cfi_def_cfa sp, 96
; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 243dc19a25588d..fa24597c118aa7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -2241,6 +2241,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 4
; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
@@ -2405,6 +2406,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: li a1, 34
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64f16(<vscale x 64 x half> %va, <vscale x 64 x half> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
@@ -3644,6 +3646,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i1> @llvm.vp.fcmp.nxv32f64(<vscale x 32 x double> %va, <vscale x 32 x double> %vb, metadata !"oeq", <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 007afe12b8e438..4360466d104931 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1204,6 +1204,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 128 x i1> @llvm.vp.icmp.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, metadata !"eq", <vscale x 128 x i1> %m, i32 %evl)
@@ -2430,6 +2431,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i1> @llvm.vp.icmp.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, metadata !"eq", <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index cf6ce89b9b5a46..ffcfb7d19e7b67 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -607,6 +607,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
call void @llvm.experimental.vp.strided.store.nxv17f64.p0.i32(<vscale x 17 x double> %v, ptr %ptr, i32 %stride, <vscale x 17 x i1> %mask, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
index f3574200054fd4..941a59a12ca725 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
@@ -290,6 +290,7 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -346,6 +347,7 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index a320aecc6fce49..9f222a04deb255 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -168,6 +168,7 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i6
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%vec = load <vscale x 16 x i64>, ptr %p
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index ef4baf34d23f03..41d36ba75a1b09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -226,6 +226,7 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%retval = call {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.experimental.vector.deinterleave2.nxv16i64(<vscale x 16 x i64> %vec)
@@ -427,6 +428,7 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%retval = call {<vscale x 8 x double>, <vscale x 8 x double>} @llvm.experimental.vector.deinterleave2.nxv16f64(<vscale x 16 x double> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index c23c10205e6e36..bd8d7bbf4b47bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -140,6 +140,7 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%res = call <vscale x 16 x i64> @llvm.experimental.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index e84fd1b1a7036a..87d424825f79d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -315,6 +315,7 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
@@ -350,6 +351,7 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 3
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
%res = call <vscale x 16 x i64> @llvm.experimental.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
@@ -615,6 +617,7 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
@@ -650,6 +653,7 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 3
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
%res = call <vscale x 16 x double> @llvm.experimental.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 00cb54c61a7a94..b90acb6cbf3e0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -626,6 +626,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
@@ -682,6 +683,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index a49c0fd08ffe46..1ba819f318e424 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -570,6 +570,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
@@ -626,6 +627,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index c18602c98e6b87..51b0aec6eda0e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -1292,6 +1292,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: li a1, 40
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.fma.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -1357,6 +1358,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index d4ba0f8c907338..7266a6fd7a304a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -251,6 +251,7 @@ define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -292,6 +293,7 @@ define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 16 x half> poison, half %c, i32 0
@@ -397,6 +399,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: li a1, 40
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x half> %vb, <vscale x 32 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -487,6 +490,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %c, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 1f716a9abcc595..9b33f0aa527cfe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -278,6 +278,7 @@ define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 16 x half> poison, half %c, i32 0
@@ -383,6 +384,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: li a1, 40
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%vd = call <vscale x 32 x half> @llvm.fma.v32f16(<vscale x 32 x half> %vc, <vscale x 32 x half> %vb, <vscale x 32 x half> %va)
@@ -441,6 +443,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %c, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index c3aaf743af170b..c76e7e1b70de9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -290,6 +290,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -346,6 +347,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index f18fa85e68d1be..fbe79e92bb01ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -290,6 +290,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -346,6 +347,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
index 5114f0a8d1d65d..19746657a1e0ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
@@ -418,6 +418,7 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: li a1, 28
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%neg = fneg <vscale x 32 x half> %va
@@ -487,6 +488,7 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %c, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 46b14153447cf7..8733919f507695 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -570,6 +570,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
@@ -626,6 +627,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 3bbedc109bd087..98ef9822680eb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1292,6 +1292,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: li a1, 40
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.fmuladd.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> %m, i32 %evl)
@@ -1357,6 +1358,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index db34980f525264..e0e65208d7bd88 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -495,6 +495,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%neg = fneg <vscale x 32 x half> %vc
@@ -578,6 +579,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: li a1, 12
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %c, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index ccbed4b9590500..1bdd9958ac19bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -305,6 +305,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%neg = fneg <vscale x 16 x half> %vc
@@ -352,6 +353,7 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vsca
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 16 x half> poison, half %c, i32 0
@@ -457,6 +459,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: li a1, 28
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%neg = fneg <vscale x 32 x half> %vc
@@ -544,6 +547,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %c, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index c6554561be3395..2139615c338289 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -128,6 +128,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.fptrunc.nxv16f64.nxv16f32(<vscale x 16 x double> %a, <vscale x 16 x i1> %m, i32 %vl)
@@ -213,6 +214,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x float> @llvm.vp.fptrunc.nxv32f64.nxv32f32(<vscale x 32 x double> %a, <vscale x 32 x i1> %m, i32 %vl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 0df7b2ce1978da..3e6dfb9e246f2d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -570,6 +570,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
@@ -626,6 +627,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
index d84df3a06473cb..e905a87a16eb0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
@@ -705,6 +705,7 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vscal
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
index 3dc8340600fded..366f51ba0f0cec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
@@ -649,6 +649,7 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
index 6eb1f512f76af7..61d2c2c014bda9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
@@ -625,6 +625,7 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
index 47df1b005a0f85..e80a8a43b8ebba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
@@ -557,6 +557,7 @@ define <vscale x 128 x i8> @test_vp_reverse_nxv128i8(<vscale x 128 x i8> %src, i
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a3)
; CHECK-NEXT: addi sp, s0, -80
+; CHECK-NEXT: .cfi_def_cfa sp, 80
; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 2f8454983d0d6e..acfdcd1444fba1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -408,6 +408,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.merge.nxv128i8(<vscale x 128 x i1> %m, <vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index a907e149b167fb..952ead0c37eeda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -2182,6 +2182,7 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
call void @llvm.vp.scatter.nxv16f64.nxv16p0(<vscale x 16 x double> %val, <vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 %evl)
@@ -2262,6 +2263,7 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i16> %idxs
@@ -2343,6 +2345,7 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%eidxs = sext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index 8b27a61e243db1..5dfd4de320f2ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -456,6 +456,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
call void @llvm.vp.store.nxv17f64.p0(<vscale x 17 x double> %val, ptr %ptr, <vscale x 17 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 3faceb0aa6b614..977e92943215cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -488,6 +488,7 @@ define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %v
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%cond = icmp eq <vscale x 16 x i64> %va, zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 1b568bf8801b10..6c360f27312b20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -397,6 +397,7 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.select.nxv32i32(<vscale x 32 x i1> %a, <vscale x 32 x i32> %b, <vscale x 32 x i32> %c, i32 %evl)
@@ -454,6 +455,7 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%evl = call i32 @llvm.vscale.i32()
@@ -740,6 +742,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.select.nxv16f64(<vscale x 16 x i1> %a, <vscale x 16 x double> %b, <vscale x 16 x double> %c, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index a624a42b3873bb..19239cacd10414 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -356,6 +356,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index d0b76e7e4535b8..6d8f545d27e00a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -63,6 +63,7 @@ body: |
; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3
; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+ ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 16
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
; CHECK-NEXT: PseudoRET
%0:gpr = COPY $x10
>From 9041f460511cf567974bf639cd5c8761dfbd4b38 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sun, 14 Jan 2024 22:25:40 -0800
Subject: [PATCH 03/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
clang/include/clang/Basic/AttrDocs.td | 2 +-
.../RISCV/riscv-vector-callingconv.cpp | 1 -
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 38 +++----------------
3 files changed, 7 insertions(+), 34 deletions(-)
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 8f3fd8927a10a9..8a0df89a3615d5 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -5440,7 +5440,7 @@ def RISCVVectorCCDocs : Documentation {
The ``riscv_vector_cc`` attribute can be applied to a function. It preserves 15
registers namely, v1-v7 and v24-v31 as callee-saved. Callers thus don't need
to save these registers before function calls, and callees only need to save
-them only if they use them.
+them if they use them.
}];
}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
index 22761146093fde..a187eb887943fb 100644
--- a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
@@ -1,4 +1,3 @@
-// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 %s -triple riscv64 -target-feature +v -verify
__attribute__((riscv_vector_cc)) int var; // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'int'}}
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index a5e8b22227d516..dfe191ba45b0fe 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -640,7 +640,7 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// directives.
for (const auto &Entry : CSI) {
int FrameIdx = Entry.getFrameIdx();
- if (FrameIdx >=0 &&
+ if (FrameIdx >= 0 &&
MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
continue;
@@ -1651,37 +1651,11 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
DL = MI->getDebugLoc();
// Manually restore values not restored by libcall & Push/Pop.
- // Keep the same order as in the prologue. There is no need to reverse the
- // order in the epilogue. In addition, the return address will be restored
- // first in the epilogue. It increases the opportunity to avoid the
- // load-to-use data hazard between loading RA and return by RA.
- // loadRegFromStackSlot can insert multiple instructions.
- //
- //
- // We first change the restore order for scalar and vector
- // callee-saved registers as the layout shown below:
- //
- // Epilog restore order (original):
- // ----------------------------
- // RVV objects
- // ----------------------------
- // Callee-saved regs(scalar)
- // Callee-saved regs(vector)
- // ----------------------------
- //
- // Epilog restore order (after):
- // ----------------------------
- // RVV objects
- // ----------------------------
- // Callee-saved regs(vector)
- // Callee-saved regs(scalar)
- // ----------------------------
- //
- // So that it is able to put all vector registers which need
- // to be restored together. The return address will be restored
- // first in the scalar regs. It increases the opportunity to avoid the
- // load-to-use data hazard between loading RA and return by RA.
- // loadRegFromStackSlot can insert multiple instructions.
+ // Reverse the restore order in epilog. In addition, the return
+ // address will be restored first in the epilogue. It increases
+ // the opportunity to avoid the load-to-use data hazard between
+ // loading RA and return by RA. loadRegFromStackSlot can insert
+ // multiple instructions.
const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
>From a3ac9a58d927a2675730e74808fd258e6b2c3435 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Mon, 22 Jan 2024 22:04:32 -0800
Subject: [PATCH 04/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
.../RISCV/riscv-vector-callingconv-llvm-ir.c | 12 +-
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 2 +-
.../RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll | 124 ------------------
3 files changed, 5 insertions(+), 133 deletions(-)
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
index a5b59306223dfb..27f9ee9d1e271e 100644
--- a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
@@ -7,10 +7,8 @@
// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @bar
vint32m1_t __attribute__((riscv_vector_cc)) bar(vint32m1_t input);
vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
- vint32m1_t ret;
- vint32m1_t val;
- val = __riscv_vle32_v_i32m1(base, vl);
- ret = bar(input);
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
__riscv_vse32_v_i32m1(base, val, vl);
return ret;
}
@@ -18,10 +16,8 @@ vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
// CHECK-LLVM: call <vscale x 2 x i32> @baz
vint32m1_t baz(vint32m1_t input);
vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
- vint32m1_t ret;
- vint32m1_t val;
- val = __riscv_vle32_v_i32m1(base, vl);
- ret = baz(input);
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = baz(input);
__riscv_vse32_v_i32m1(base, val, vl);
return ret;
}
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index dfe191ba45b0fe..946ab37eb36062 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1110,7 +1110,7 @@ RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
// First push RVV Callee Saved object, then push RVV stack object
std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
- if (RVVCSI.size())
+ if (!RVVCSI.empty())
pushRVVObjects(RVVCSI[0].getFrameIdx(),
RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
index 89340494256ac8..92f4731b19eb2d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
@@ -695,68 +695,6 @@ define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-; RV32-LABEL: fshr_v16i64:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vle64.v v24, (a0)
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsll.vi v16, v8, 1, v0.t
-; RV32-NEXT: li a0, 32
-; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.i v8, -1
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vxor.vv v8, v24, v8, v0.t
-; RV32-NEXT: li a0, 63
-; RV32-NEXT: vand.vx v8, v8, a0, v0.t
-; RV32-NEXT: vsll.vv v8, v16, v8, v0.t
-; RV32-NEXT: vand.vx v16, v24, a0, v0.t
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vv v16, v24, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: ret
-; RV64-LABEL: fshr_v16i64:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 3
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vle64.v v24, (a0)
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsll.vi v16, v8, 1, v0.t
-; RV64-NEXT: li a0, 63
-; RV64-NEXT: vnot.v v8, v24, v0.t
-; RV64-NEXT: vand.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsll.vv v8, v16, v8, v0.t
-; RV64-NEXT: vand.vx v16, v24, a0, v0.t
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsrl.vv v16, v24, v16, v0.t
-; RV64-NEXT: vor.vv v8, v8, v16, v0.t
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: .cfi_def_cfa sp, 16
-; RV64-NEXT: addi sp, sp, 16
-; RV64-NEXT: ret
%res = call <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
ret <16 x i64> %res
}
@@ -793,68 +731,6 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-; RV32-LABEL: fshl_v16i64:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vle64.v v24, (a0)
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v16, 1, v0.t
-; RV32-NEXT: li a0, 32
-; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.i v8, -1
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vxor.vv v8, v24, v8, v0.t
-; RV32-NEXT: li a0, 63
-; RV32-NEXT: vand.vx v8, v8, a0, v0.t
-; RV32-NEXT: vsrl.vv v8, v16, v8, v0.t
-; RV32-NEXT: vand.vx v16, v24, a0, v0.t
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsll.vv v16, v24, v16, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: ret
-; RV64-LABEL: fshl_v16i64:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 3
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vle64.v v24, (a0)
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsrl.vi v16, v16, 1, v0.t
-; RV64-NEXT: li a0, 63
-; RV64-NEXT: vnot.v v8, v24, v0.t
-; RV64-NEXT: vand.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsrl.vv v8, v16, v8, v0.t
-; RV64-NEXT: vand.vx v16, v24, a0, v0.t
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsll.vv v16, v24, v16, v0.t
-; RV64-NEXT: vor.vv v8, v16, v8, v0.t
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: .cfi_def_cfa sp, 16
-; RV64-NEXT: addi sp, sp, 16
-; RV64-NEXT: ret
%res = call <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
ret <16 x i64> %res
}
>From 5d07763edec7677a0b28935fea57976fb02f337a Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Tue, 6 Feb 2024 19:43:57 -0800
Subject: [PATCH 05/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
clang/include/clang/Basic/Attr.td | 4 ++-
.../RISCV/riscv-vector-callingconv-llvm-ir.c | 11 +++++++
.../riscv-vector-callingconv-llvm-ir.cpp | 32 +++++++++++++++++++
.../CodeGen/RISCV/riscv-vector-callingconv.c | 17 ++++++++++
.../RISCV/riscv-vector-callingconv.cpp | 17 ++++++++++
5 files changed, 80 insertions(+), 1 deletion(-)
create mode 100644 clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
create mode 100644 clang/test/CodeGen/RISCV/riscv-vector-callingconv.c
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index eb9e7cd3075d04..3bb4b88b6e0751 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -2981,7 +2981,9 @@ def PreserveNone : DeclOrTypeAttr, TargetSpecificAttr<TargetAnyX86> {
}
def RISCVVectorCC: DeclOrTypeAttr {
- let Spellings = [Clang<"riscv_vector_cc">];
+ let Spellings = [CXX11<"riscv", "vector_cc">,
+ C23<"riscv", "vector_cc">,
+ Clang<"riscv_vector_cc">];
let Documentation = [RISCVVectorCCDocs];
}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
index 27f9ee9d1e271e..072d8a863d4570 100644
--- a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
@@ -1,6 +1,8 @@
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
+// RUN: %clang_cc1 -std=c23 -triple riscv64 -target-feature +v \
+// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
#include <riscv_vector.h>
@@ -13,6 +15,15 @@ vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
return ret;
}
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @bar
+[[riscv::vector_cc]] vint32m1_t bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr2(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
// CHECK-LLVM: call <vscale x 2 x i32> @baz
vint32m1_t baz(vint32m1_t input);
vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
new file mode 100644
index 00000000000000..c01aeb21f67571
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
@@ -0,0 +1,32 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -std=c++11 -triple riscv64 -target-feature +v \
+// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
+
+#include <riscv_vector.h>
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @_Z3baru15__rvv_int32m1_t
+vint32m1_t __attribute__((riscv_vector_cc)) bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @_Z3baru15__rvv_int32m1_t
+[[riscv::vector_cc]] vint32m1_t bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr2(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call <vscale x 2 x i32> @_Z3bazu15__rvv_int32m1_t
+vint32m1_t baz(vint32m1_t input);
+vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = baz(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.c
new file mode 100644
index 00000000000000..5c35901799b427
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.c
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 %s -std=c23 -triple riscv64 -target-feature +v -verify
+
+__attribute__((riscv_vector_cc)) int var; // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'int'}}
+
+__attribute__((riscv_vector_cc)) void func();
+__attribute__((riscv_vector_cc(1))) void func_invalid(); // expected-error {{'riscv_vector_cc' attribute takes no arguments}}
+
+void test_no_attribute(int); // expected-note {{previous declaration is here}}
+void __attribute__((riscv_vector_cc)) test_no_attribute(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
+
+[[riscv::vector_cc]] int var2; // expected-warning {{'vector_cc' only applies to function types; type here is 'int'}}
+
+[[riscv::vector_cc]] void func2();
+[[riscv::vector_cc(1)]] void func_invalid2(); // expected-error {{'vector_cc' attribute takes no arguments}}
+
+void test_no_attribute2(int); // expected-note {{previous declaration is here}}
+[[riscv::vector_cc]] void test_no_attribute2(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
index a187eb887943fb..264bb7d9ad7c00 100644
--- a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
@@ -16,3 +16,20 @@ void test_lambda() {
__attribute__((riscv_vector_cc)) auto lambda = []() { // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'auto'}}
};
}
+
+[[riscv::vector_cc]] int var2; // expected-warning {{'vector_cc' only applies to function types; type here is 'int'}}
+
+[[riscv::vector_cc]] void func2();
+[[riscv::vector_cc(1)]] void func_invalid2(); // expected-error {{'vector_cc' attribute takes no arguments}}
+
+void test_no_attribute2(int); // expected-note {{previous declaration is here}}
+[[riscv::vector_cc]] void test_no_attribute2(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
+
+class test_cc2 {
+ [[riscv::vector_cc]] void member_func();
+};
+
+void test_lambda2() {
+ [[riscv::vector_cc]] auto lambda = []() { // expected-warning {{'vector_cc' only applies to function types; type here is 'auto'}}
+ };
+}
>From 6ad5f48bd04b4e723fc84ef6e261f3fe2f1b17a1 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sat, 2 Mar 2024 07:24:28 -0800
Subject: [PATCH 06/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
clang/include/clang-c/Index.h | 1 +
clang/lib/CodeGen/CGDebugInfo.cpp | 2 ++
clang/tools/libclang/CXType.cpp | 1 +
llvm/include/llvm/BinaryFormat/Dwarf.def | 1 +
4 files changed, 5 insertions(+)
diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index 3f3620609b6ddc..ff249c8b3cff39 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -2991,6 +2991,7 @@ enum CXCallingConv {
CXCallingConv_AArch64SVEPCS = 18,
CXCallingConv_M68kRTD = 19,
CXCallingConv_PreserveNone = 20,
+ CXCallingConv_RISCVVectorCall = 21,
CXCallingConv_Invalid = 100,
CXCallingConv_Unexposed = 200
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index c2c01439f2dc99..d00345a31f815d 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -1452,6 +1452,8 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_M68kRTD;
case CC_PreserveNone:
return llvm::dwarf::DW_CC_LLVM_PreserveNone;
+ case CC_RISCVVectorCall:
+ return llvm::dwarf::DW_CC_LLVM_RISCVVectorCall;
}
return 0;
}
diff --git a/clang/tools/libclang/CXType.cpp b/clang/tools/libclang/CXType.cpp
index 292d524f00abd6..991767dc4c49c6 100644
--- a/clang/tools/libclang/CXType.cpp
+++ b/clang/tools/libclang/CXType.cpp
@@ -680,6 +680,7 @@ CXCallingConv clang_getFunctionTypeCallingConv(CXType X) {
TCALLINGCONV(PreserveAll);
TCALLINGCONV(M68kRTD);
TCALLINGCONV(PreserveNone);
+ TCALLINGCONV(RISCVVectorCall);
case CC_SpirFunction: return CXCallingConv_Unexposed;
case CC_AMDGPUKernelCall: return CXCallingConv_Unexposed;
case CC_OpenCLKernel: return CXCallingConv_Unexposed;
diff --git a/llvm/include/llvm/BinaryFormat/Dwarf.def b/llvm/include/llvm/BinaryFormat/Dwarf.def
index e70b58d5ea50fc..d8927c6202fd57 100644
--- a/llvm/include/llvm/BinaryFormat/Dwarf.def
+++ b/llvm/include/llvm/BinaryFormat/Dwarf.def
@@ -1040,6 +1040,7 @@ HANDLE_DW_CC(0xca, LLVM_PreserveAll)
HANDLE_DW_CC(0xcb, LLVM_X86RegCall)
HANDLE_DW_CC(0xcc, LLVM_M68kRTD)
HANDLE_DW_CC(0xcd, LLVM_PreserveNone)
+HANDLE_DW_CC(0xce, LLVM_RISCVVectorCall)
// From GCC source code (include/dwarf2.h): This DW_CC_ value is not currently
// generated by any toolchain. It is used internally to GDB to indicate OpenCL
// C functions that have been compiled with the IBM XL C for OpenCL compiler and
>From 2128c8704cc20b32403bce059422cc0eba33efb9 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sat, 2 Mar 2024 07:37:09 -0800
Subject: [PATCH 07/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
clang/include/clang/Basic/Attr.td | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 3bb4b88b6e0751..f31817e4bea3b8 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -2980,7 +2980,7 @@ def PreserveNone : DeclOrTypeAttr, TargetSpecificAttr<TargetAnyX86> {
let Documentation = [PreserveNoneDocs];
}
-def RISCVVectorCC: DeclOrTypeAttr {
+def RISCVVectorCC: DeclOrTypeAttr, TargetSpecificAttr<TargetRISCV> {
let Spellings = [CXX11<"riscv", "vector_cc">,
C23<"riscv", "vector_cc">,
Clang<"riscv_vector_cc">];
>From 9bf8ab8169b6ce7c45fa3a3688ca914b7d33ebb1 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sat, 2 Mar 2024 07:38:32 -0800
Subject: [PATCH 08/10] fixup! [RISCV] Add CFI information for vector
callee-saved registers
---
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 26 +++++++++-----------
1 file changed, 12 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 946ab37eb36062..23130d79ede0fa 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -478,9 +478,9 @@ static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
else
Comment << printReg(Reg, &TRI);
- appendScalableVectorExpression(
- Expr, FixedOffset, ScalableOffset,
- TRI.getDwarfRegNum(RISCV::VLENB, true), Comment);
+ appendScalableVectorExpression(Expr, FixedOffset, ScalableOffset,
+ TRI.getDwarfRegNum(RISCV::VLENB, true),
+ Comment);
SmallString<64> DefCfaExpr;
uint8_t buffer[16];
@@ -493,8 +493,7 @@ static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
}
static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI,
- Register Reg,
- uint64_t FixedOffset,
+ Register Reg, uint64_t FixedOffset,
uint64_t ScalableOffset) {
assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
SmallString<64> Expr;
@@ -503,9 +502,9 @@ static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI,
Comment << printReg(Reg, &TRI) << " @ cfa";
// Build up the expression (FixedOffset + ScalableOffset * VLENB).
- appendScalableVectorExpression(
- Expr, FixedOffset, ScalableOffset,
- TRI.getDwarfRegNum(RISCV::VLENB, true), Comment);
+ appendScalableVectorExpression(Expr, FixedOffset, ScalableOffset,
+ TRI.getDwarfRegNum(RISCV::VLENB, true),
+ Comment);
SmallString<64> DefCfaExpr;
uint8_t buffer[16];
@@ -1593,8 +1592,7 @@ void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
return;
uint64_t FixedSize = getStackSizeWithRVVPadding(*MF) +
- RVFI->getLibCallStackSize() +
- RVFI->getRVPushStackSize();
+ RVFI->getLibCallStackSize() + RVFI->getRVPushStackSize();
if (!HasFP) {
uint64_t ScalarLocalVarSize =
MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
@@ -1603,13 +1601,13 @@ void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
FixedSize -= ScalarLocalVarSize;
}
- for (auto &CS: RVVCSI) {
+ for (auto &CS : RVVCSI) {
// Insert the spill to the stack frame.
int FI = CS.getFrameIdx();
if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector) {
- unsigned CFIIndex = MF->addFrameInst(createDefCFAOffset(
- *STI.getRegisterInfo(), CS.getReg(),
- -FixedSize, MFI.getObjectOffset(FI) / 8));
+ unsigned CFIIndex = MF->addFrameInst(
+ createDefCFAOffset(*STI.getRegisterInfo(), CS.getReg(), -FixedSize,
+ MFI.getObjectOffset(FI) / 8));
BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlag(MachineInstr::FrameSetup);
>From e9f607cab34e1c26518c1680e51fb9b2382d8f6f Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sat, 2 Mar 2024 07:39:24 -0800
Subject: [PATCH 09/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
clang/include/clang/Basic/Specifiers.h | 44 ++++++++++----------
clang/lib/AST/Type.cpp | 3 +-
clang/lib/Basic/Targets/RISCV.cpp | 10 ++---
clang/lib/CodeGen/CGCall.cpp | 3 +-
llvm/lib/AsmParser/LLParser.cpp | 4 +-
llvm/lib/IR/AsmWriter.cpp | 4 +-
llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 11 ++---
7 files changed, 43 insertions(+), 36 deletions(-)
diff --git a/clang/include/clang/Basic/Specifiers.h b/clang/include/clang/Basic/Specifiers.h
index 6c23fdb7a1dff1..fb11e8212f8b68 100644
--- a/clang/include/clang/Basic/Specifiers.h
+++ b/clang/include/clang/Basic/Specifiers.h
@@ -273,30 +273,30 @@ namespace clang {
/// CallingConv - Specifies the calling convention that a function uses.
enum CallingConv {
- CC_C, // __attribute__((cdecl))
- CC_X86StdCall, // __attribute__((stdcall))
- CC_X86FastCall, // __attribute__((fastcall))
- CC_X86ThisCall, // __attribute__((thiscall))
- CC_X86VectorCall, // __attribute__((vectorcall))
- CC_X86Pascal, // __attribute__((pascal))
- CC_Win64, // __attribute__((ms_abi))
- CC_X86_64SysV, // __attribute__((sysv_abi))
- CC_X86RegCall, // __attribute__((regcall))
- CC_AAPCS, // __attribute__((pcs("aapcs")))
- CC_AAPCS_VFP, // __attribute__((pcs("aapcs-vfp")))
- CC_IntelOclBicc, // __attribute__((intel_ocl_bicc))
- CC_SpirFunction, // default for OpenCL functions on SPIR target
- CC_OpenCLKernel, // inferred for OpenCL kernels
- CC_Swift, // __attribute__((swiftcall))
+ CC_C, // __attribute__((cdecl))
+ CC_X86StdCall, // __attribute__((stdcall))
+ CC_X86FastCall, // __attribute__((fastcall))
+ CC_X86ThisCall, // __attribute__((thiscall))
+ CC_X86VectorCall, // __attribute__((vectorcall))
+ CC_X86Pascal, // __attribute__((pascal))
+ CC_Win64, // __attribute__((ms_abi))
+ CC_X86_64SysV, // __attribute__((sysv_abi))
+ CC_X86RegCall, // __attribute__((regcall))
+ CC_AAPCS, // __attribute__((pcs("aapcs")))
+ CC_AAPCS_VFP, // __attribute__((pcs("aapcs-vfp")))
+ CC_IntelOclBicc, // __attribute__((intel_ocl_bicc))
+ CC_SpirFunction, // default for OpenCL functions on SPIR target
+ CC_OpenCLKernel, // inferred for OpenCL kernels
+ CC_Swift, // __attribute__((swiftcall))
CC_SwiftAsync, // __attribute__((swiftasynccall))
- CC_PreserveMost, // __attribute__((preserve_most))
- CC_PreserveAll, // __attribute__((preserve_all))
+ CC_PreserveMost, // __attribute__((preserve_most))
+ CC_PreserveAll, // __attribute__((preserve_all))
CC_AArch64VectorCall, // __attribute__((aarch64_vector_pcs))
- CC_AArch64SVEPCS, // __attribute__((aarch64_sve_pcs))
- CC_AMDGPUKernelCall, // __attribute__((amdgpu_kernel))
- CC_M68kRTD, // __attribute__((m68k_rtd))
- CC_PreserveNone, // __attribute__((preserve_none))
- CC_RISCVVectorCall, // __attribute__((riscv_vector_cc))
+ CC_AArch64SVEPCS, // __attribute__((aarch64_sve_pcs))
+ CC_AMDGPUKernelCall, // __attribute__((amdgpu_kernel))
+ CC_M68kRTD, // __attribute__((m68k_rtd))
+ CC_PreserveNone, // __attribute__((preserve_none))
+ CC_RISCVVectorCall, // __attribute__((riscv_vector_cc))
};
/// Checks whether the given calling convention supports variadic
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 0fc18d44ffd802..979f754e286e10 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -3439,7 +3439,8 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_PreserveAll: return "preserve_all";
case CC_M68kRTD: return "m68k_rtd";
case CC_PreserveNone: return "preserve_none";
- case CC_RISCVVectorCall: return "riscv_vector_cc";
+ case CC_RISCVVectorCall:
+ return "riscv_vector_cc";
}
llvm_unreachable("Invalid calling convention.");
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 91cf6c9b49222b..f3d705e1551fe2 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -471,10 +471,10 @@ ParsedTargetAttr RISCVTargetInfo::parseTargetAttr(StringRef Features) const {
TargetInfo::CallingConvCheckResult
RISCVTargetInfo::checkCallingConvention(CallingConv CC) const {
switch (CC) {
- default:
- return CCCR_Warning;
- case CC_C:
- case CC_RISCVVectorCall:
- return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_RISCVVectorCall:
+ return CCCR_OK;
}
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index aafdff3e7e53b0..2e1911ebdc5b9f 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -74,7 +74,8 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
- case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall;
+ case CC_RISCVVectorCall:
+ return llvm::CallingConv::RISCV_VectorCall;
}
}
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 8fcc5670810ad2..9e7a74ba583a7e 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -2201,7 +2201,9 @@ bool LLParser::parseOptionalCallingConv(unsigned &CC) {
case lltok::kw_tailcc: CC = CallingConv::Tail; break;
case lltok::kw_m68k_rtdcc: CC = CallingConv::M68k_RTD; break;
case lltok::kw_graalcc: CC = CallingConv::GRAAL; break;
- case lltok::kw_riscv_vector_cc:CC = CallingConv::RISCV_VectorCall; break;
+ case lltok::kw_riscv_vector_cc:
+ CC = CallingConv::RISCV_VectorCall;
+ break;
case lltok::kw_cc: {
Lex.Lex();
return parseUInt32(CC);
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 2e5332dfa3f71e..c143e0d9d83038 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -363,7 +363,9 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::AMDGPU_KERNEL: Out << "amdgpu_kernel"; break;
case CallingConv::AMDGPU_Gfx: Out << "amdgpu_gfx"; break;
case CallingConv::M68k_RTD: Out << "m68k_rtdcc"; break;
- case CallingConv::RISCV_VectorCall: Out << "riscv_vector_cc"; break;
+ case CallingConv::RISCV_VectorCall:
+ Out << "riscv_vector_cc";
+ break;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 23130d79ede0fa..a53ab532a9d363 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1565,12 +1565,12 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
auto storeRegToStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
- for (auto &CS: CSInfo) {
+ for (auto &CS : CSInfo) {
// Insert the spill to the stack frame.
Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
- RC, TRI, Register());
+ TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg),
+ CS.getFrameIdx(), RC, TRI, Register());
}
};
storeRegToStackSlot(UnmanagedCSI);
@@ -1658,12 +1658,13 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
- for (auto &CS: CSInfo) {
+ for (auto &CS : CSInfo) {
Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
Register());
- assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
+ assert(MI != MBB.begin() &&
+ "loadRegFromStackSlot didn't insert any code!");
}
};
loadRegFromStackSlot(RVVCSI);
>From 7af75ebfedf5307a5a1f39b95995a37683545255 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sat, 2 Mar 2024 07:49:01 -0800
Subject: [PATCH 10/10] fixup! [RISCV] RISCV vector calling convention (1/2)
---
clang/include/clang/Basic/AttrDocs.td | 1 +
1 file changed, 1 insertion(+)
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 8a0df89a3615d5..1cd4afa38be7ab 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -5436,6 +5436,7 @@ for clang builtin functions.
def RISCVVectorCCDocs : Documentation {
let Category = DocCatCallingConvs;
+ let Heading = "riscv::vector_cc, riscv_vector_cc, clang::riscv_vector_cc";
let Content = [{
The ``riscv_vector_cc`` attribute can be applied to a function. It preserves 15
registers namely, v1-v7 and v24-v31 as callee-saved. Callers thus don't need
More information about the cfe-commits
mailing list