[llvm] [RISCV][GISEL] Add vector RegisterBanks and vector support in getRegBankFromRegClass (PR #71541)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 9 14:26:42 PST 2023


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/71541

>From 4f454e2f4a557c7daa335fb1692befe93bb50929 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 06:25:02 -0800
Subject: [PATCH 1/7] [RISCV] Use TypeSize in places where needed for
 RegBankSelection

---
 llvm/include/llvm/CodeGen/RegisterBankInfo.h | 4 ++--
 llvm/lib/CodeGen/MachineVerifier.cpp         | 4 ++--
 llvm/lib/CodeGen/RegisterBankInfo.cpp        | 9 +++++----
 3 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/RegisterBankInfo.h b/llvm/include/llvm/CodeGen/RegisterBankInfo.h
index 1ee1f6b6c32ed63..b353ea8b3cc86ec 100644
--- a/llvm/include/llvm/CodeGen/RegisterBankInfo.h
+++ b/llvm/include/llvm/CodeGen/RegisterBankInfo.h
@@ -177,7 +177,7 @@ class RegisterBankInfo {
     /// \note This method does not check anything when assertions are disabled.
     ///
     /// \return True is the check was successful.
-    bool verify(const RegisterBankInfo &RBI, unsigned MeaningfulBitWidth) const;
+    bool verify(const RegisterBankInfo &RBI, TypeSize MeaningfulBitWidth) const;
 
     /// Print this on dbgs() stream.
     void dump() const;
@@ -749,7 +749,7 @@ class RegisterBankInfo {
   /// virtual register.
   ///
   /// \pre \p Reg != 0 (NoRegister).
-  unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
+  TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
                          const TargetRegisterInfo &TRI) const;
 
   /// Check that information hold by this instance make sense for the
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index da1d9c6f0679c7f..8b97a4a1b93750d 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2262,8 +2262,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
           }
 
           // Make sure the register fits into its register bank if any.
-          if (RegBank && Ty.isValid() &&
-              RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
+          if (RegBank && Ty.isValid() && (!Ty.isScalable() &&
+              RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits())) {
             report("Register bank is too small for virtual register", MO,
                    MONum);
             errs() << "Register bank " << RegBank->getName() << " too small("
diff --git a/llvm/lib/CodeGen/RegisterBankInfo.cpp b/llvm/lib/CodeGen/RegisterBankInfo.cpp
index f9721d7d9386958..6a96bb40f56aed9 100644
--- a/llvm/lib/CodeGen/RegisterBankInfo.cpp
+++ b/llvm/lib/CodeGen/RegisterBankInfo.cpp
@@ -495,7 +495,7 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
   }
 }
 
-unsigned RegisterBankInfo::getSizeInBits(Register Reg,
+TypeSize RegisterBankInfo::getSizeInBits(Register Reg,
                                          const MachineRegisterInfo &MRI,
                                          const TargetRegisterInfo &TRI) const {
   if (Reg.isPhysical()) {
@@ -553,7 +553,7 @@ bool RegisterBankInfo::ValueMapping::partsAllUniform() const {
 }
 
 bool RegisterBankInfo::ValueMapping::verify(const RegisterBankInfo &RBI,
-                                            unsigned MeaningfulBitWidth) const {
+                                            TypeSize MeaningfulBitWidth) const {
   assert(NumBreakDowns && "Value mapped nowhere?!");
   unsigned OrigValueBitWidth = 0;
   for (const RegisterBankInfo::PartialMapping &PartMap : *this) {
@@ -565,8 +565,9 @@ bool RegisterBankInfo::ValueMapping::verify(const RegisterBankInfo &RBI,
     OrigValueBitWidth =
         std::max(OrigValueBitWidth, PartMap.getHighBitIdx() + 1);
   }
-  assert(OrigValueBitWidth >= MeaningfulBitWidth &&
-         "Meaningful bits not covered by the mapping");
+  assert(MeaningfulBitWidth.isScalable() ||
+         OrigValueBitWidth >= MeaningfulBitWidth &&
+             "Meaningful bits not covered by the mapping");
   APInt ValueMask(OrigValueBitWidth, 0);
   for (const RegisterBankInfo::PartialMapping &PartMap : *this) {
     // Check that the union of the partial mappings covers the whole value,

>From feb2257318ceee082390b678a9425f4fe2860aa4 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 07:29:35 -0800
Subject: [PATCH 2/7] [RISCV][GISEL] Add support for scalable vector types in
 lowerReturnVal

Scalable vector types from LLVM IR are lowered into physical vector
registers in MIR based on calling convention.

This patch is stacked on #70881.
---
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |   2 +-
 llvm/lib/CodeGen/LowLevelType.cpp             |   2 +-
 llvm/lib/CodeGen/MachineVerifier.cpp          |   3 +
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  |  25 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   7 +-
 .../irtranslator/vec-ret-bf16-err.ll          |  14 +
 .../irtranslator/vec-ret-f16-err.ll           |  14 +
 .../RISCV/GlobalISel/irtranslator/vec-ret.ll  | 809 ++++++++++++++++++
 8 files changed, 872 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll

diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 178f1fc9313d71b..d7ea25838058e4b 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -359,7 +359,7 @@ bool IRTranslator::translateCompare(const User &U,
 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
   const ReturnInst &RI = cast<ReturnInst>(U);
   const Value *Ret = RI.getReturnValue();
-  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
+  if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
     Ret = nullptr;
 
   ArrayRef<Register> VRegs;
diff --git a/llvm/lib/CodeGen/LowLevelType.cpp b/llvm/lib/CodeGen/LowLevelType.cpp
index 24c30b756737b20..cd85bf606989f9e 100644
--- a/llvm/lib/CodeGen/LowLevelType.cpp
+++ b/llvm/lib/CodeGen/LowLevelType.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
 
 LLT::LLT(MVT VT) {
   if (VT.isVector()) {
-    bool asVector = VT.getVectorMinNumElements() > 1;
+    bool asVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
     init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
          VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
          /*AddressSpace=*/0);
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 8b97a4a1b93750d..98ff7df0eb84ccc 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1966,6 +1966,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
+    if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
+        !DstSize.isScalable())
+      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index a1dbc21ca364666..1edcb34539a49d9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -301,6 +301,27 @@ struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
     : CallLowering(&TLI) {}
 
+/// Return true if scalable vector with ScalarTy is legal for lowering.
+static bool isLegalElementTypeForRVV(Type *EltTy,
+                                     const RISCVSubtarget &Subtarget) {
+  if (EltTy->isPointerTy())
+    return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
+  if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) ||
+      EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32))
+    return true;
+  if (EltTy->isIntegerTy(64))
+    return Subtarget.hasVInstructionsI64();
+  if (EltTy->isHalfTy())
+    return Subtarget.hasVInstructionsF16();
+  if (EltTy->isBFloatTy())
+    return Subtarget.hasVInstructionsBF16();
+  if (EltTy->isFloatTy())
+    return Subtarget.hasVInstructionsF32();
+  if (EltTy->isDoubleTy())
+    return Subtarget.hasVInstructionsF64();
+  return false;
+}
+
 // TODO: Support all argument types.
 static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) {
   // TODO: Integers larger than 2*XLen are passed indirectly which is not
@@ -336,6 +357,10 @@ static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget) {
     return true;
   }
 
+  if (T->isVectorTy() && Subtarget.hasVInstructions() && T->isScalableTy() &&
+      isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
+    return true;
+
   return false;
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8d13563eb138150..3ec22c7579fae4b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -19638,12 +19638,15 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
 }
 
 bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
-  // We don't support scalable vectors in GISel.
+  // At the moment, the only scalable instruction GISel knows how to lower is
+  // ret with scalable argument.
+
   if (Inst.getType()->isScalableTy())
     return true;
 
   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
-    if (Inst.getOperand(i)->getType()->isScalableTy())
+    if (Inst.getOperand(i)->getType()->isScalableTy() &&
+        !isa<ReturnInst>(&Inst))
       return true;
 
   if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
new file mode 100644
index 000000000000000..c968d0726317f19
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
@@ -0,0 +1,14 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for bf16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define <vscale x 1 x bfloat> @test_ret_nxv1bf16() {
+entry:
+  ret <vscale x 1 x bfloat> undef
+}
+
+; CHECK: LLVM ERROR: unable to translate instruction: ret (in function: test_ret_nxv1bf16)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
new file mode 100644
index 000000000000000..f87ca94ceb4f103
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
@@ -0,0 +1,14 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for f16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define <vscale x 1 x half> @test_ret_nxv1f16() {
+entry:
+  ret <vscale x 1 x half> undef
+}
+
+; CHECK: LLVM ERROR: unable to translate instruction: ret (in function: test_ret_nxv1f16)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
new file mode 100644
index 000000000000000..eec9969063c87a5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -0,0 +1,809 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+; ==========================================================================
+; ============================= Scalable Types =============================
+; ==========================================================================
+
+define <vscale x 1 x i8> @test_ret_nxv1i8() {
+  ; RV32-LABEL: name: test_ret_nxv1i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i8> undef
+}
+
+define <vscale x 2 x i8> @test_ret_nxv2i8() {
+  ; RV32-LABEL: name: test_ret_nxv2i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i8> undef
+}
+
+define <vscale x 4 x i8> @test_ret_nxv4i8() {
+  ; RV32-LABEL: name: test_ret_nxv4i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i8> undef
+}
+
+define <vscale x 8 x i8> @test_ret_nxv8i8() {
+  ; RV32-LABEL: name: test_ret_nxv8i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 8 x i8> undef
+}
+
+define <vscale x 16 x i8> @test_ret_nxv16i8() {
+  ; RV32-LABEL: name: test_ret_nxv16i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 16 x i8> undef
+}
+
+define <vscale x 32 x i8> @test_ret_nxv32i8() {
+  ; RV32-LABEL: name: test_ret_nxv32i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 32 x i8> undef
+}
+
+define <vscale x 64 x i8> @test_ret_nxv64i8() {
+  ; RV32-LABEL: name: test_ret_nxv64i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv64i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 64 x i8> undef
+}
+
+define <vscale x 1 x i16> @test_ret_nxv1i16() {
+  ; RV32-LABEL: name: test_ret_nxv1i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i16> undef
+}
+
+define <vscale x 2 x i16> @test_ret_nxv2i16() {
+  ; RV32-LABEL: name: test_ret_nxv2i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i16> undef
+}
+
+define <vscale x 4 x i16> @test_ret_nxv4i16() {
+  ; RV32-LABEL: name: test_ret_nxv4i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i16> undef
+}
+
+define <vscale x 8 x i16> @test_ret_nxv8i16() {
+  ; RV32-LABEL: name: test_ret_nxv8i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x i16> undef
+}
+
+define <vscale x 16 x i16> @test_ret_nxv16i16() {
+  ; RV32-LABEL: name: test_ret_nxv16i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x i16> undef
+}
+
+define <vscale x 32 x i16> @test_ret_nxv32i16() {
+  ; RV32-LABEL: name: test_ret_nxv32i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x i16> undef
+}
+
+define <vscale x 1 x i32> @test_ret_nxv1i32() {
+  ; RV32-LABEL: name: test_ret_nxv1i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i32> undef
+}
+
+define <vscale x 2 x i32> @test_ret_nxv2i32() {
+  ; RV32-LABEL: name: test_ret_nxv2i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i32> undef
+}
+
+define <vscale x 4 x i32> @test_ret_nxv4i32() {
+  ; RV32-LABEL: name: test_ret_nxv4i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 4 x i32> undef
+}
+
+define <vscale x 8 x i32> @test_ret_nxv8i32() {
+  ; RV32-LABEL: name: test_ret_nxv8i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 8 x i32> undef
+}
+
+define <vscale x 16 x i32> @test_ret_nxv16i32() {
+  ; RV32-LABEL: name: test_ret_nxv16i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 16 x i32> undef
+}
+
+define <vscale x 1 x i64> @test_ret_nxv1i64() {
+  ; RV32-LABEL: name: test_ret_nxv1i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i64> undef
+}
+
+define <vscale x 2 x i64> @test_ret_nxv2i64() {
+  ; RV32-LABEL: name: test_ret_nxv2i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 2 x i64> undef
+}
+
+define <vscale x 4 x i64> @test_ret_nxv4i64() {
+  ; RV32-LABEL: name: test_ret_nxv4i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 4 x i64> undef
+}
+
+define <vscale x 8 x i64> @test_ret_nxv8i64() {
+  ; RV32-LABEL: name: test_ret_nxv8i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 8 x i64> undef
+}
+
+define <vscale x 64 x i1> @test_ret_nxv64i1() {
+  ; RV32-LABEL: name: test_ret_nxv64i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv64i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 64 x i1> undef
+}
+
+define <vscale x 32 x i1> @test_ret_nxv32i1() {
+  ; RV32-LABEL: name: test_ret_nxv32i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 32 x i1> undef
+}
+
+define <vscale x 16 x i1> @test_ret_nxv16i1() {
+  ; RV32-LABEL: name: test_ret_nxv16i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 16 x i1> undef
+}
+
+define <vscale x 8 x i1> @test_ret_nxv8i1() {
+  ; RV32-LABEL: name: test_ret_nxv8i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 8 x i1> undef
+}
+
+define <vscale x 4 x i1> @test_ret_nxv4i1() {
+  ; RV32-LABEL: name: test_ret_nxv4i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i1> undef
+}
+
+define <vscale x 2 x i1> @test_ret_nxv2i1() {
+  ; RV32-LABEL: name: test_ret_nxv2i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i1> undef
+}
+
+define <vscale x 1 x i1> @test_ret_nxv1i1() {
+  ; RV32-LABEL: name: test_ret_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i1> undef
+}
+
+define <vscale x 1 x float> @test_ret_nxv1f32() {
+  ; RV32-LABEL: name: test_ret_nxv1f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x float> undef
+}
+
+define <vscale x 2 x float> @test_ret_nxv2f32() {
+  ; RV32-LABEL: name: test_ret_nxv2f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x float> undef
+}
+
+define <vscale x 4 x float> @test_ret_nxv4f32() {
+  ; RV32-LABEL: name: test_ret_nxv4f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 4 x float> undef
+}
+
+define <vscale x 8 x float> @test_ret_nxv8f32() {
+  ; RV32-LABEL: name: test_ret_nxv8f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 8 x float> undef
+}
+
+define <vscale x 16 x float> @test_ret_nxv16f32() {
+  ; RV32-LABEL: name: test_ret_nxv16f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 16 x float> undef
+}
+
+define <vscale x 1 x double> @test_ret_nxv1f64() {
+  ; RV32-LABEL: name: test_ret_nxv1f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x double> undef
+}
+
+define <vscale x 2 x double> @test_ret_nxv2f64() {
+  ; RV32-LABEL: name: test_ret_nxv2f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 2 x double> undef
+}
+
+define <vscale x 4 x double> @test_ret_nxv4f64() {
+  ; RV32-LABEL: name: test_ret_nxv4f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 4 x double> undef
+}
+
+define <vscale x 8 x double> @test_ret_nxv8f64() {
+  ; RV32-LABEL: name: test_ret_nxv8f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 8 x double> undef
+}
+
+define <vscale x 1 x half> @test_ret_nxv1f16() {
+  ; RV32-LABEL: name: test_ret_nxv1f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x half> undef
+}
+
+define <vscale x 2 x half> @test_ret_nxv2f16() {
+  ; RV32-LABEL: name: test_ret_nxv2f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x half> undef
+}
+
+define <vscale x 4 x half> @test_ret_nxv4f16() {
+  ; RV32-LABEL: name: test_ret_nxv4f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x half> undef
+}
+
+define <vscale x 8 x half> @test_ret_nxv8f16() {
+  ; RV32-LABEL: name: test_ret_nxv8f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x half> undef
+}
+
+define <vscale x 16 x half> @test_ret_nxv16f16() {
+  ; RV32-LABEL: name: test_ret_nxv16f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x half> undef
+}
+
+define <vscale x 32 x half> @test_ret_nxv32f16() {
+  ; RV32-LABEL: name: test_ret_nxv32f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x half> undef
+}
+
+define <vscale x 1 x bfloat> @test_ret_nxv1b16() {
+  ; RV32-LABEL: name: test_ret_nxv1b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x bfloat> undef
+}
+
+define <vscale x 2 x bfloat> @test_ret_nxv2b16() {
+  ; RV32-LABEL: name: test_ret_nxv2b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x bfloat> undef
+}
+
+define <vscale x 4 x bfloat> @test_ret_nxv4b16() {
+  ; RV32-LABEL: name: test_ret_nxv4b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x bfloat> undef
+}
+
+define <vscale x 8 x bfloat> @test_ret_nxv8b16() {
+  ; RV32-LABEL: name: test_ret_nxv8b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x bfloat> undef
+}
+
+define <vscale x 16 x bfloat> @test_ret_nxv16b16() {
+  ; RV32-LABEL: name: test_ret_nxv16b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x bfloat> undef
+}
+
+define <vscale x 32 x bfloat> @test_ret_nxv32b16() {
+  ; RV32-LABEL: name: test_ret_nxv32b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x bfloat> undef
+}

>From 388cf34712e89bb81fbde7d655f55d746aff91f5 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 06:25:13 -0800
Subject: [PATCH 3/7] [RISCV][GISEL] Add vector RegisterBanks and vector
 support in getRegBankFromRegClass

Vector Register banks are created for the various register vector
register groupings. getRegBankFromRegClass is implemented to go from
vector TargetRegisterClass to the corresponding vector RegisterBank.
---
 llvm/lib/CodeGen/MachineVerifier.cpp          |    4 +-
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |   20 +
 .../Target/RISCV/GISel/RISCVRegisterBanks.td  |   12 +
 .../GlobalISel/regbankselect/vec-args.mir     | 1058 +++++++++++++++++
 4 files changed, 1092 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 98ff7df0eb84ccc..7d8d47620089f43 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2265,8 +2265,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
           }
 
           // Make sure the register fits into its register bank if any.
-          if (RegBank && Ty.isValid() && (!Ty.isScalable() &&
-              RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits())) {
+          if (RegBank && Ty.isValid() && !(Ty.isVector() && Ty.isScalable()) &&
+              RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
             report("Register bank is too small for virtual register", MO,
                    MONum);
             errs() << "Register bank " << RegBank->getName() << " too small("
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 44ed072b0e81b3c..75d92b005eb9c3d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -100,6 +100,26 @@ RISCVRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
   case RISCV::FPR64CRegClassID:
   case RISCV::FPR32CRegClassID:
     return getRegBank(RISCV::FPRRegBankID);
+  case RISCV::VRRegClassID:
+    return getRegBank(RISCV::VRRegBankID);
+  case RISCV::VRNoV0RegClassID:
+    return getRegBank(RISCV::VRNoV0RegBankID);
+  case RISCV::VRM2RegClassID:
+    return getRegBank(RISCV::VRM2RegBankID);
+  case RISCV::VRM2NoV0RegClassID:
+    return getRegBank(RISCV::VRM2NoV0RegBankID);
+  case RISCV::VRM4RegClassID:
+    return getRegBank(RISCV::VRM4RegBankID);
+  case RISCV::VRM4NoV0RegClassID:
+    return getRegBank(RISCV::VRM4NoV0RegBankID);
+  case RISCV::VRM8RegClassID:
+    return getRegBank(RISCV::VRM8RegBankID);
+  case RISCV::VRM8NoV0RegClassID:
+    return getRegBank(RISCV::VRM8NoV0RegBankID);
+  case RISCV::VMRegClassID:
+    return getRegBank(RISCV::VMRegBankID);
+  case RISCV::VMV0RegClassID:
+    return getRegBank(RISCV::VMV0RegBankID);
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
index 49f18e19c2269fd..40ef98d2badde1d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
@@ -14,3 +14,15 @@ def GPRRegBank : RegisterBank<"GPRB", [GPR]>;
 
 /// Floating Point Registers: F.
 def FPRRegBank : RegisterBank<"FPRB", [FPR64]>;
+
+/// Vector Register Banks:
+def VRRegBank : RegisterBank<"VRB", [VR]>;
+def VRNoV0RegBank : RegisterBank<"VRNoV0B", [VRNoV0]>;
+def VRM2RegBank : RegisterBank<"VRM2B", [VRM2]>;
+def VRM2NoV0RegBank : RegisterBank<"VRM2NoV0B", [VRM2NoV0]>;
+def VRM4RegBank : RegisterBank<"VRM4B", [VRM4]>;
+def VRM4NoV0RegBank : RegisterBank<"VRM4NoV0B", [VRM4NoV0]>;
+def VRM8RegBank : RegisterBank<"VRM8B", [VRM8]>;
+def VRM8NoV0RegBank : RegisterBank<"VRM8NoV0B", [VRM8NoV0]>;
+def VMRegBank : RegisterBank<"VMB", [VM]>;
+def VMV0RegBank : RegisterBank<"VMNoV0B", [VMV0]>;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir
new file mode 100644
index 000000000000000..340db97f3fe01c7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir
@@ -0,0 +1,1058 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32  -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefixes=RV32 %s
+# RUN: llc -mtriple=riscv64  -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefixes=RV64 %s
+
+---
+name:            test_args_nxv1i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv4i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv8i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv8i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv16i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv16i8
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16i8
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv32i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv32i8
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv32i8
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv64i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv64i8
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv64i8
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv1i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv4i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv8i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv8i16
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8i16
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv16i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv16i16
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16i16
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv32i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv32i16
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv32i16
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv1i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv4i32
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4i32
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv8i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv8i32
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8i32
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv16i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv16i32
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16i32
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv1i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv2i64
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2i64
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv4i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv4i64
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4i64
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv8i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv8i64
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8i64
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv64i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv64i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv64i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 64 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv32i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv32i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv32i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 32 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv16i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv16i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv8i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv8i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv4i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv1i1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s1>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv1f32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2f32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4f32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv4f32
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4f32
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv8f32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv8f32
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8f32
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv16f32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv16f32
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16f32
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv1f64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2f64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv2f64
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2f64
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv4f64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv4f64
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4f64
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv8f64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv8f64
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8f64
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv1f16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2f16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4f16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv4f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv8f16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv8f16
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8f16
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv16f16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv16f16
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16f16
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv32f16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv32f16
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv32f16
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    PseudoRET
+...
+---
+name:            test_args_nxv1b16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv1b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv1b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv2b16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv2b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv2b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv4b16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_args_nxv4b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv4b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    PseudoRET
+...
+---
+name:            test_args_nxv8b16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m2
+    ; RV32-LABEL: name: test_args_nxv8b16
+    ; RV32: liveins: $v8m2
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv8b16
+    ; RV64: liveins: $v8m2
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    PseudoRET
+...
+---
+name:            test_args_nxv16b16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m4
+    ; RV32-LABEL: name: test_args_nxv16b16
+    ; RV32: liveins: $v8m4
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv16b16
+    ; RV64: liveins: $v8m4
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    PseudoRET
+...
+---
+name:            test_args_nxv32b16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8m8
+    ; RV32-LABEL: name: test_args_nxv32b16
+    ; RV32: liveins: $v8m8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV32-NEXT: PseudoRET
+    ;
+    ; RV64-LABEL: name: test_args_nxv32b16
+    ; RV64: liveins: $v8m8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV64-NEXT: PseudoRET
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    PseudoRET
+...

>From 300aba3860e5100930ea6dbf45f9ce8d3abeec36 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 14:35:10 -0800
Subject: [PATCH 4/7] regbankselect for return values

---
 llvm/include/llvm/CodeGen/RegisterBankInfo.h  |    4 +-
 llvm/lib/CodeGen/MachineVerifier.cpp          |    3 -
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp |   16 +-
 .../AArch64/GISel/AArch64RegisterBankInfo.h   |    2 +-
 .../GlobalISel/regbankselect/vec-args-ret.mir | 1259 +++++++++++++++++
 .../GlobalISel/regbankselect/vec-args.mir     | 1058 --------------
 6 files changed, 1272 insertions(+), 1070 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir

diff --git a/llvm/include/llvm/CodeGen/RegisterBankInfo.h b/llvm/include/llvm/CodeGen/RegisterBankInfo.h
index b353ea8b3cc86ec..459a31d962ef306 100644
--- a/llvm/include/llvm/CodeGen/RegisterBankInfo.h
+++ b/llvm/include/llvm/CodeGen/RegisterBankInfo.h
@@ -631,7 +631,7 @@ class RegisterBankInfo {
   ///
   /// \note Since this is a copy, both registers have the same size.
   virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
-                            unsigned Size) const {
+                            TypeSize Size) const {
     // Optimistically assume that copies are coalesced. I.e., when
     // they are on the same bank, they are free.
     // Otherwise assume a non-zero cost of 1. The targets are supposed
@@ -641,7 +641,7 @@ class RegisterBankInfo {
 
   /// \returns true if emitting a copy from \p Src to \p Dst is impossible.
   bool cannotCopy(const RegisterBank &Dst, const RegisterBank &Src,
-                  unsigned Size) const {
+                  TypeSize Size) const {
     return copyCost(Dst, Src, Size) == std::numeric_limits<unsigned>::max();
   }
 
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 7d8d47620089f43..6f2985399eaaa9f 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1966,9 +1966,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
-    if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
-        !DstSize.isScalable())
-      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 21f9f6437e4fe91..4ca5b3674461d89 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -216,7 +216,7 @@ AArch64RegisterBankInfo::AArch64RegisterBankInfo(
 
 unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
                                            const RegisterBank &B,
-                                           unsigned Size) const {
+                                           TypeSize Size) const {
   // What do we do with different size?
   // copy are same size.
   // Will introduce other hooks for different size:
@@ -340,12 +340,16 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
         /*NumOperands*/ 2);
     const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
         /*ID*/ 3,
-        /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
+        /*Cost*/
+        copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
+                 TypeSize::Fixed(Size)),
         getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
         /*NumOperands*/ 2);
     const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
         /*ID*/ 3,
-        /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
+        /*Cost*/
+        copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
+                 TypeSize::Fixed(Size)),
         getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
         /*NumOperands*/ 2);
 
@@ -709,7 +713,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       assert(DstRB && SrcRB && "Both RegBank were nullptr");
       unsigned Size = getSizeInBits(DstReg, MRI, TRI);
       return getInstructionMapping(
-          DefaultMappingID, copyCost(*DstRB, *SrcRB, Size),
+          DefaultMappingID, copyCost(*DstRB, *SrcRB, TypeSize::Fixed(Size)),
           getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
           // We only care about the mapping of the destination.
           /*NumOperands*/ 1);
@@ -728,7 +732,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     const RegisterBank &SrcRB =
         SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
     return getInstructionMapping(
-        DefaultMappingID, copyCost(DstRB, SrcRB, Size),
+        DefaultMappingID, copyCost(DstRB, SrcRB, TypeSize::Fixed(Size)),
         getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
         // We only care about the mapping of the destination for COPY.
         /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
@@ -821,7 +825,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       Cost = copyCost(
           *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
           *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
-          OpSize[0]);
+          TypeSize::Fixed(OpSize[0]));
     break;
   case TargetOpcode::G_LOAD: {
     // Loading in vector unit is slightly more expensive.
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
index 4d40efb5ac92485..bb0dbe71029e892 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
@@ -140,7 +140,7 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
   AArch64RegisterBankInfo(const TargetRegisterInfo &TRI);
 
   unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
-                    unsigned Size) const override;
+                    TypeSize Size) const override;
 
   const RegisterBank &getRegBankFromRegClass(const TargetRegisterClass &RC,
                                              LLT) const override;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
new file mode 100644
index 000000000000000..3a4808d1a133835
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
@@ -0,0 +1,1259 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32  -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefixes=RV32 %s
+# RUN: llc -mtriple=riscv64  -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefixes=RV64 %s
+
+...
+---
+name:            test_ret_nxv1i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv16i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv32i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv64i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv64i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv64i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv16i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv32i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv8i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv16i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv4i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv8i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv64i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv64i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv64i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 64 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 64 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv32i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 32 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 32 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv16i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 16 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 16 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 8 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv1i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv1f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv8f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv16f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv4f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv8f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv16f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv32f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv16b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv32b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir
deleted file mode 100644
index 340db97f3fe01c7..000000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args.mir
+++ /dev/null
@@ -1,1058 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32  -run-pass=regbankselect \
-# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefixes=RV32 %s
-# RUN: llc -mtriple=riscv64  -run-pass=regbankselect \
-# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefixes=RV64 %s
-
----
-name:            test_args_nxv1i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1i8
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1i8
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2i8
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2i8
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv4i8
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4i8
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv8i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv8i8
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8i8
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv16i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv16i8
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16i8
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s8>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv32i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv32i8
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv32i8
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 32 x s8>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv64i8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv64i8
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv64i8
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 64 x s8>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv1i16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1i16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1i16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2i16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2i16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2i16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4i16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv4i16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4i16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv8i16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv8i16
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8i16
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s16>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv16i16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv16i16
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16i16
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s16>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv32i16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv32i16
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv32i16
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 32 x s16>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv1i32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1i32
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1i32
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2i32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2i32
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2i32
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4i32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv4i32
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4i32
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s32>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv8i32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv8i32
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8i32
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s32>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv16i32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv16i32
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16i32
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s32>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv1i64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1i64
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1i64
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2i64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv2i64
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2i64
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s64>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv4i64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv4i64
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4i64
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s64>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv8i64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv8i64
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8i64
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s64>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv64i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv64i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv64i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 64 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv32i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv32i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv32i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 32 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv16i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv16i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv8i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv8i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv4i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv1i1
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1i1
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1i1
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s1>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv1f32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1f32
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1f32
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2f32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2f32
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2f32
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4f32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv4f32
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4f32
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s32>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv8f32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv8f32
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8f32
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s32>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv16f32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv16f32
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16f32
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s32>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv1f64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1f64
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1f64
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2f64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv2f64
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2f64
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s64>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv4f64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv4f64
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4f64
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s64>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv8f64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv8f64
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8f64
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s64>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv1f16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1f16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1f16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2f16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2f16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2f16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4f16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv4f16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4f16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv8f16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv8f16
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8f16
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s16>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv16f16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv16f16
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16f16
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s16>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv32f16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv32f16
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv32f16
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 32 x s16>) = COPY $v8m8
-    PseudoRET
-...
----
-name:            test_args_nxv1b16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv1b16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv1b16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv2b16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv2b16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv2b16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv4b16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; RV32-LABEL: name: test_args_nxv4b16
-    ; RV32: liveins: $v8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv4b16
-    ; RV64: liveins: $v8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    PseudoRET
-...
----
-name:            test_args_nxv8b16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m2
-    ; RV32-LABEL: name: test_args_nxv8b16
-    ; RV32: liveins: $v8m2
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv8b16
-    ; RV64: liveins: $v8m2
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm2nov0b(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 8 x s16>) = COPY $v8m2
-    PseudoRET
-...
----
-name:            test_args_nxv16b16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m4
-    ; RV32-LABEL: name: test_args_nxv16b16
-    ; RV32: liveins: $v8m4
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv16b16
-    ; RV64: liveins: $v8m4
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm4nov0b(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 16 x s16>) = COPY $v8m4
-    PseudoRET
-...
----
-name:            test_args_nxv32b16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8m8
-    ; RV32-LABEL: name: test_args_nxv32b16
-    ; RV32: liveins: $v8m8
-    ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV32-NEXT: PseudoRET
-    ;
-    ; RV64-LABEL: name: test_args_nxv32b16
-    ; RV64: liveins: $v8m8
-    ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrm8nov0b(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV64-NEXT: PseudoRET
-    %0:_(<vscale x 32 x s16>) = COPY $v8m8
-    PseudoRET
-...

>From 0b077025897ce3e2523969f7405f7c575e86a3d1 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 8 Nov 2023 10:11:43 -0800
Subject: [PATCH 5/7] use isScalableVector helper

---
 llvm/lib/CodeGen/MachineVerifier.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 6f2985399eaaa9f..209c418f64b8c7f 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2262,7 +2262,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
           }
 
           // Make sure the register fits into its register bank if any.
-          if (RegBank && Ty.isValid() && !(Ty.isVector() && Ty.isScalable()) &&
+          if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
               RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
             report("Register bank is too small for virtual register", MO,
                    MONum);

>From ac9e36f2669429f05e8dbd5ff33a31e8dc691f90 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 8 Nov 2023 11:34:57 -0800
Subject: [PATCH 6/7] fix build errors for AMDGPU

---
 llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 4 ++--
 llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h   | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9703c71ac2a4217..becf9a7faae5ba7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -229,7 +229,7 @@ bool AMDGPURegisterBankInfo::isDivergentRegBank(const RegisterBank *RB) const {
 
 unsigned AMDGPURegisterBankInfo::copyCost(const RegisterBank &Dst,
                                           const RegisterBank &Src,
-                                          unsigned Size) const {
+                                          TypeSize Size) const {
   // TODO: Should there be a UniformVGPRRegBank which can use readfirstlane?
   if (Dst.getID() == AMDGPU::SGPRRegBankID &&
       (isVectorRegisterBank(Src) || Src.getID() == AMDGPU::VCCRegBankID)) {
@@ -3542,7 +3542,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
 
     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
     if (MI.getOpcode() != AMDGPU::G_FREEZE &&
-        cannotCopy(*DstBank, *SrcBank, Size))
+        cannotCopy(*DstBank, *SrcBank, TypeSize::Fixed(Size)))
       return getInvalidInstructionMapping();
 
     const ValueMapping &ValMap = getValueMapping(0, Size, *DstBank);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index 06bf3c7275471aa..b5d16e70ab23a20 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -165,7 +165,7 @@ class AMDGPURegisterBankInfo final : public AMDGPUGenRegisterBankInfo {
   bool isDivergentRegBank(const RegisterBank *RB) const override;
 
   unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
-                    unsigned Size) const override;
+                    TypeSize Size) const override;
 
   unsigned getBreakDownCost(const ValueMapping &ValMapping,
                             const RegisterBank *CurBank = nullptr) const override;

>From ac738e66a1ec595ae04bd2c17b22bdd174e93471 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 9 Nov 2023 14:26:22 -0800
Subject: [PATCH 7/7] single vector regbank

---
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |  11 +-
 .../Target/RISCV/GISel/RISCVRegisterBanks.td  |  12 +-
 .../GlobalISel/regbankselect/vec-args-ret.mir | 200 +++++++++---------
 3 files changed, 103 insertions(+), 120 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 75d92b005eb9c3d..31c0836f617df8e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -101,25 +101,16 @@ RISCVRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
   case RISCV::FPR32CRegClassID:
     return getRegBank(RISCV::FPRRegBankID);
   case RISCV::VRRegClassID:
-    return getRegBank(RISCV::VRRegBankID);
   case RISCV::VRNoV0RegClassID:
-    return getRegBank(RISCV::VRNoV0RegBankID);
   case RISCV::VRM2RegClassID:
-    return getRegBank(RISCV::VRM2RegBankID);
   case RISCV::VRM2NoV0RegClassID:
-    return getRegBank(RISCV::VRM2NoV0RegBankID);
   case RISCV::VRM4RegClassID:
-    return getRegBank(RISCV::VRM4RegBankID);
   case RISCV::VRM4NoV0RegClassID:
-    return getRegBank(RISCV::VRM4NoV0RegBankID);
   case RISCV::VRM8RegClassID:
-    return getRegBank(RISCV::VRM8RegBankID);
   case RISCV::VRM8NoV0RegClassID:
-    return getRegBank(RISCV::VRM8NoV0RegBankID);
   case RISCV::VMRegClassID:
-    return getRegBank(RISCV::VMRegBankID);
   case RISCV::VMV0RegClassID:
-    return getRegBank(RISCV::VMV0RegBankID);
+    return getRegBank(RISCV::VRRegBankID);
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
index 40ef98d2badde1d..9ca43b0134fc324 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
@@ -15,14 +15,6 @@ def GPRRegBank : RegisterBank<"GPRB", [GPR]>;
 /// Floating Point Registers: F.
 def FPRRegBank : RegisterBank<"FPRB", [FPR64]>;
 
-/// Vector Register Banks:
+/// Vector Regististers: V.
 def VRRegBank : RegisterBank<"VRB", [VR]>;
-def VRNoV0RegBank : RegisterBank<"VRNoV0B", [VRNoV0]>;
-def VRM2RegBank : RegisterBank<"VRM2B", [VRM2]>;
-def VRM2NoV0RegBank : RegisterBank<"VRM2NoV0B", [VRM2NoV0]>;
-def VRM4RegBank : RegisterBank<"VRM4B", [VRM4]>;
-def VRM4NoV0RegBank : RegisterBank<"VRM4NoV0B", [VRM4NoV0]>;
-def VRM8RegBank : RegisterBank<"VRM8B", [VRM8]>;
-def VRM8NoV0RegBank : RegisterBank<"VRM8NoV0B", [VRM8NoV0]>;
-def VMRegBank : RegisterBank<"VMB", [VM]>;
-def VMV0RegBank : RegisterBank<"VMNoV0B", [VMV0]>;
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
index 3a4808d1a133835..e20d5f7fbb74fb7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
@@ -17,14 +17,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
@@ -42,14 +42,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
@@ -67,14 +67,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
@@ -92,14 +92,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv8i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
@@ -117,14 +117,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv16i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 16 x s8>) = COPY $v8
@@ -142,14 +142,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv32i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 32 x s8>) = COPY $v8
@@ -167,14 +167,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv64i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv64i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 64 x s8>) = COPY $v8
@@ -192,14 +192,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
@@ -217,14 +217,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
@@ -242,14 +242,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
@@ -267,14 +267,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv8i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8
@@ -292,14 +292,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv16i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8
@@ -317,14 +317,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv32i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8
@@ -342,14 +342,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
@@ -367,14 +367,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
@@ -392,14 +392,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv4i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = COPY $v8
@@ -417,14 +417,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv8i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = COPY $v8
@@ -442,14 +442,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv16i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = COPY $v8
@@ -467,14 +467,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
@@ -492,14 +492,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv2i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = COPY $v8
@@ -517,14 +517,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv4i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = COPY $v8
@@ -542,14 +542,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv8i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = COPY $v8
@@ -567,14 +567,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv64i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv64i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 64 x s1>) = COPY $v8
@@ -592,14 +592,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv32i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 32 x s1>) = COPY $v8
@@ -617,14 +617,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv16i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 16 x s1>) = COPY $v8
@@ -642,14 +642,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv8i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s1>) = COPY $v8
@@ -667,14 +667,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s1>) = COPY $v8
@@ -692,14 +692,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s1>) = COPY $v8
@@ -717,14 +717,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s1>) = COPY $v8
@@ -742,14 +742,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
@@ -767,14 +767,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
@@ -792,14 +792,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv4f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = COPY $v8
@@ -817,14 +817,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv8f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = COPY $v8
@@ -842,14 +842,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv16f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = COPY $v8
@@ -867,14 +867,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
@@ -892,14 +892,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv2f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = COPY $v8
@@ -917,14 +917,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv4f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = COPY $v8
@@ -942,14 +942,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv8f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = COPY $v8
@@ -967,14 +967,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
@@ -992,14 +992,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
@@ -1017,14 +1017,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
@@ -1042,14 +1042,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv8f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8
@@ -1067,14 +1067,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv16f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8
@@ -1092,14 +1092,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv32f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8
@@ -1117,14 +1117,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
@@ -1142,14 +1142,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
@@ -1167,14 +1167,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
@@ -1192,14 +1192,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv8b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8
@@ -1217,14 +1217,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv16b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8
@@ -1242,14 +1242,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv32b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8



More information about the llvm-commits mailing list