[llvm] [RISCV][GISEL] Add vector RegisterBanks and vector support in getRegBankFromRegClass (PR #71541)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 14 13:38:38 PST 2023


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/71541

>From a1a77bb149c67aa6bbb6755a2d5e5b2f1ab96968 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 06:25:02 -0800
Subject: [PATCH 1/6] [RISCV] Use TypeSize in places where needed for
 RegBankSelection

This is a precommit for #71514 to use TypeSize instead of unsigned to
avoid crashes when scalable vectors are used.
---
 llvm/include/llvm/CodeGen/RegisterBankInfo.h     |  8 ++++----
 llvm/lib/CodeGen/MachineVerifier.cpp             |  2 +-
 llvm/lib/CodeGen/RegisterBankInfo.cpp            |  9 +++++----
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp    | 16 ++++++++++------
 .../AArch64/GISel/AArch64RegisterBankInfo.h      |  2 +-
 .../lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp |  4 ++--
 llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h  |  2 +-
 7 files changed, 24 insertions(+), 19 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/RegisterBankInfo.h b/llvm/include/llvm/CodeGen/RegisterBankInfo.h
index 1ee1f6b6c32ed63..459a31d962ef306 100644
--- a/llvm/include/llvm/CodeGen/RegisterBankInfo.h
+++ b/llvm/include/llvm/CodeGen/RegisterBankInfo.h
@@ -177,7 +177,7 @@ class RegisterBankInfo {
     /// \note This method does not check anything when assertions are disabled.
     ///
     /// \return True is the check was successful.
-    bool verify(const RegisterBankInfo &RBI, unsigned MeaningfulBitWidth) const;
+    bool verify(const RegisterBankInfo &RBI, TypeSize MeaningfulBitWidth) const;
 
     /// Print this on dbgs() stream.
     void dump() const;
@@ -631,7 +631,7 @@ class RegisterBankInfo {
   ///
   /// \note Since this is a copy, both registers have the same size.
   virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
-                            unsigned Size) const {
+                            TypeSize Size) const {
     // Optimistically assume that copies are coalesced. I.e., when
     // they are on the same bank, they are free.
     // Otherwise assume a non-zero cost of 1. The targets are supposed
@@ -641,7 +641,7 @@ class RegisterBankInfo {
 
   /// \returns true if emitting a copy from \p Src to \p Dst is impossible.
   bool cannotCopy(const RegisterBank &Dst, const RegisterBank &Src,
-                  unsigned Size) const {
+                  TypeSize Size) const {
     return copyCost(Dst, Src, Size) == std::numeric_limits<unsigned>::max();
   }
 
@@ -749,7 +749,7 @@ class RegisterBankInfo {
   /// virtual register.
   ///
   /// \pre \p Reg != 0 (NoRegister).
-  unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
+  TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
                          const TargetRegisterInfo &TRI) const;
 
   /// Check that information hold by this instance make sense for the
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 6107fa5c43c57f9..729dfc67491ee14 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2262,7 +2262,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
           }
 
           // Make sure the register fits into its register bank if any.
-          if (RegBank && Ty.isValid() &&
+          if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
               RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
             report("Register bank is too small for virtual register", MO,
                    MONum);
diff --git a/llvm/lib/CodeGen/RegisterBankInfo.cpp b/llvm/lib/CodeGen/RegisterBankInfo.cpp
index f9721d7d9386958..6a96bb40f56aed9 100644
--- a/llvm/lib/CodeGen/RegisterBankInfo.cpp
+++ b/llvm/lib/CodeGen/RegisterBankInfo.cpp
@@ -495,7 +495,7 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
   }
 }
 
-unsigned RegisterBankInfo::getSizeInBits(Register Reg,
+TypeSize RegisterBankInfo::getSizeInBits(Register Reg,
                                          const MachineRegisterInfo &MRI,
                                          const TargetRegisterInfo &TRI) const {
   if (Reg.isPhysical()) {
@@ -553,7 +553,7 @@ bool RegisterBankInfo::ValueMapping::partsAllUniform() const {
 }
 
 bool RegisterBankInfo::ValueMapping::verify(const RegisterBankInfo &RBI,
-                                            unsigned MeaningfulBitWidth) const {
+                                            TypeSize MeaningfulBitWidth) const {
   assert(NumBreakDowns && "Value mapped nowhere?!");
   unsigned OrigValueBitWidth = 0;
   for (const RegisterBankInfo::PartialMapping &PartMap : *this) {
@@ -565,8 +565,9 @@ bool RegisterBankInfo::ValueMapping::verify(const RegisterBankInfo &RBI,
     OrigValueBitWidth =
         std::max(OrigValueBitWidth, PartMap.getHighBitIdx() + 1);
   }
-  assert(OrigValueBitWidth >= MeaningfulBitWidth &&
-         "Meaningful bits not covered by the mapping");
+  assert(MeaningfulBitWidth.isScalable() ||
+         OrigValueBitWidth >= MeaningfulBitWidth &&
+             "Meaningful bits not covered by the mapping");
   APInt ValueMask(OrigValueBitWidth, 0);
   for (const RegisterBankInfo::PartialMapping &PartMap : *this) {
     // Check that the union of the partial mappings covers the whole value,
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 21f9f6437e4fe91..4ca5b3674461d89 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -216,7 +216,7 @@ AArch64RegisterBankInfo::AArch64RegisterBankInfo(
 
 unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
                                            const RegisterBank &B,
-                                           unsigned Size) const {
+                                           TypeSize Size) const {
   // What do we do with different size?
   // copy are same size.
   // Will introduce other hooks for different size:
@@ -340,12 +340,16 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
         /*NumOperands*/ 2);
     const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
         /*ID*/ 3,
-        /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
+        /*Cost*/
+        copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
+                 TypeSize::Fixed(Size)),
         getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
         /*NumOperands*/ 2);
     const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
         /*ID*/ 3,
-        /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
+        /*Cost*/
+        copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
+                 TypeSize::Fixed(Size)),
         getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
         /*NumOperands*/ 2);
 
@@ -709,7 +713,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       assert(DstRB && SrcRB && "Both RegBank were nullptr");
       unsigned Size = getSizeInBits(DstReg, MRI, TRI);
       return getInstructionMapping(
-          DefaultMappingID, copyCost(*DstRB, *SrcRB, Size),
+          DefaultMappingID, copyCost(*DstRB, *SrcRB, TypeSize::Fixed(Size)),
           getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
           // We only care about the mapping of the destination.
           /*NumOperands*/ 1);
@@ -728,7 +732,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     const RegisterBank &SrcRB =
         SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
     return getInstructionMapping(
-        DefaultMappingID, copyCost(DstRB, SrcRB, Size),
+        DefaultMappingID, copyCost(DstRB, SrcRB, TypeSize::Fixed(Size)),
         getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
         // We only care about the mapping of the destination for COPY.
         /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
@@ -821,7 +825,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       Cost = copyCost(
           *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
           *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
-          OpSize[0]);
+          TypeSize::Fixed(OpSize[0]));
     break;
   case TargetOpcode::G_LOAD: {
     // Loading in vector unit is slightly more expensive.
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
index 48bd9fbeadb41bc..b6364c6a64099a4 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
@@ -140,7 +140,7 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
   AArch64RegisterBankInfo(const TargetRegisterInfo &TRI);
 
   unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
-                    unsigned Size) const override;
+                    TypeSize Size) const override;
 
   const RegisterBank &getRegBankFromRegClass(const TargetRegisterClass &RC,
                                              LLT) const override;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 4ee58c9ef0cb9b8..49322109bdb74f0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -229,7 +229,7 @@ bool AMDGPURegisterBankInfo::isDivergentRegBank(const RegisterBank *RB) const {
 
 unsigned AMDGPURegisterBankInfo::copyCost(const RegisterBank &Dst,
                                           const RegisterBank &Src,
-                                          unsigned Size) const {
+                                          TypeSize Size) const {
   // TODO: Should there be a UniformVGPRRegBank which can use readfirstlane?
   if (Dst.getID() == AMDGPU::SGPRRegBankID &&
       (isVectorRegisterBank(Src) || Src.getID() == AMDGPU::VCCRegBankID)) {
@@ -3542,7 +3542,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
 
     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
     if (MI.getOpcode() != AMDGPU::G_FREEZE &&
-        cannotCopy(*DstBank, *SrcBank, Size))
+        cannotCopy(*DstBank, *SrcBank, TypeSize::Fixed(Size)))
       return getInvalidInstructionMapping();
 
     const ValueMapping &ValMap = getValueMapping(0, Size, *DstBank);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index 06bf3c7275471aa..b5d16e70ab23a20 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -165,7 +165,7 @@ class AMDGPURegisterBankInfo final : public AMDGPUGenRegisterBankInfo {
   bool isDivergentRegBank(const RegisterBank *RB) const override;
 
   unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
-                    unsigned Size) const override;
+                    TypeSize Size) const override;
 
   unsigned getBreakDownCost(const ValueMapping &ValMapping,
                             const RegisterBank *CurBank = nullptr) const override;

>From 16a10c3547c34983f6634be0d121b8437dd6310a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 07:29:35 -0800
Subject: [PATCH 2/6] [RISCV][GISEL] Add support for scalable vector types in
 lowerReturnVal

Scalable vector types from LLVM IR are lowered into physical vector
registers in MIR based on calling convention.

This patch is stacked on #70881.
---
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |   2 +-
 llvm/lib/CodeGen/MachineVerifier.cpp          |   3 +
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  |   4 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   7 +-
 .../irtranslator/vec-ret-bf16-err.ll          |  14 +
 .../irtranslator/vec-ret-f16-err.ll           |  14 +
 .../RISCV/GlobalISel/irtranslator/vec-ret.ll  | 809 ++++++++++++++++++
 7 files changed, 850 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll

diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 178f1fc9313d71b..d7ea25838058e4b 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -359,7 +359,7 @@ bool IRTranslator::translateCompare(const User &U,
 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
   const ReturnInst &RI = cast<ReturnInst>(U);
   const Value *Ret = RI.getReturnValue();
-  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
+  if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
     Ret = nullptr;
 
   ArrayRef<Register> VRegs;
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 729dfc67491ee14..417d9b359591153 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1966,6 +1966,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
+    if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
+        !DstSize.isScalable())
+      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index e73d8863963d0b2..6f387d0a5022c27 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -368,6 +368,10 @@ static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget) {
     return true;
   }
 
+  if (T->isVectorTy() && Subtarget.hasVInstructions() && T->isScalableTy() &&
+      isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
+    return true;
+
   return false;
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a4cd8327f45f82a..55ef4bd5a4e4bdb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -19638,12 +19638,15 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
 }
 
 bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
-  // We don't support scalable vectors in GISel.
+  // At the moment, the only scalable instruction GISel knows how to lower is
+  // ret with scalable argument.
+
   if (Inst.getType()->isScalableTy())
     return true;
 
   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
-    if (Inst.getOperand(i)->getType()->isScalableTy())
+    if (Inst.getOperand(i)->getType()->isScalableTy() &&
+        !isa<ReturnInst>(&Inst))
       return true;
 
   if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
new file mode 100644
index 000000000000000..c968d0726317f19
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
@@ -0,0 +1,14 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for bf16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define <vscale x 1 x bfloat> @test_ret_nxv1bf16() {
+entry:
+  ret <vscale x 1 x bfloat> undef
+}
+
+; CHECK: LLVM ERROR: unable to translate instruction: ret (in function: test_ret_nxv1bf16)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
new file mode 100644
index 000000000000000..f87ca94ceb4f103
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
@@ -0,0 +1,14 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for f16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define <vscale x 1 x half> @test_ret_nxv1f16() {
+entry:
+  ret <vscale x 1 x half> undef
+}
+
+; CHECK: LLVM ERROR: unable to translate instruction: ret (in function: test_ret_nxv1f16)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
new file mode 100644
index 000000000000000..eec9969063c87a5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -0,0 +1,809 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+; ==========================================================================
+; ============================= Scalable Types =============================
+; ==========================================================================
+
+define <vscale x 1 x i8> @test_ret_nxv1i8() {
+  ; RV32-LABEL: name: test_ret_nxv1i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i8> undef
+}
+
+define <vscale x 2 x i8> @test_ret_nxv2i8() {
+  ; RV32-LABEL: name: test_ret_nxv2i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i8> undef
+}
+
+define <vscale x 4 x i8> @test_ret_nxv4i8() {
+  ; RV32-LABEL: name: test_ret_nxv4i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i8> undef
+}
+
+define <vscale x 8 x i8> @test_ret_nxv8i8() {
+  ; RV32-LABEL: name: test_ret_nxv8i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 8 x i8> undef
+}
+
+define <vscale x 16 x i8> @test_ret_nxv16i8() {
+  ; RV32-LABEL: name: test_ret_nxv16i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 16 x i8> undef
+}
+
+define <vscale x 32 x i8> @test_ret_nxv32i8() {
+  ; RV32-LABEL: name: test_ret_nxv32i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 32 x i8> undef
+}
+
+define <vscale x 64 x i8> @test_ret_nxv64i8() {
+  ; RV32-LABEL: name: test_ret_nxv64i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv64i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 64 x i8> undef
+}
+
+define <vscale x 1 x i16> @test_ret_nxv1i16() {
+  ; RV32-LABEL: name: test_ret_nxv1i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i16> undef
+}
+
+define <vscale x 2 x i16> @test_ret_nxv2i16() {
+  ; RV32-LABEL: name: test_ret_nxv2i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i16> undef
+}
+
+define <vscale x 4 x i16> @test_ret_nxv4i16() {
+  ; RV32-LABEL: name: test_ret_nxv4i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i16> undef
+}
+
+define <vscale x 8 x i16> @test_ret_nxv8i16() {
+  ; RV32-LABEL: name: test_ret_nxv8i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x i16> undef
+}
+
+define <vscale x 16 x i16> @test_ret_nxv16i16() {
+  ; RV32-LABEL: name: test_ret_nxv16i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x i16> undef
+}
+
+define <vscale x 32 x i16> @test_ret_nxv32i16() {
+  ; RV32-LABEL: name: test_ret_nxv32i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x i16> undef
+}
+
+define <vscale x 1 x i32> @test_ret_nxv1i32() {
+  ; RV32-LABEL: name: test_ret_nxv1i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i32> undef
+}
+
+define <vscale x 2 x i32> @test_ret_nxv2i32() {
+  ; RV32-LABEL: name: test_ret_nxv2i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i32> undef
+}
+
+define <vscale x 4 x i32> @test_ret_nxv4i32() {
+  ; RV32-LABEL: name: test_ret_nxv4i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 4 x i32> undef
+}
+
+define <vscale x 8 x i32> @test_ret_nxv8i32() {
+  ; RV32-LABEL: name: test_ret_nxv8i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 8 x i32> undef
+}
+
+define <vscale x 16 x i32> @test_ret_nxv16i32() {
+  ; RV32-LABEL: name: test_ret_nxv16i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 16 x i32> undef
+}
+
+define <vscale x 1 x i64> @test_ret_nxv1i64() {
+  ; RV32-LABEL: name: test_ret_nxv1i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i64> undef
+}
+
+define <vscale x 2 x i64> @test_ret_nxv2i64() {
+  ; RV32-LABEL: name: test_ret_nxv2i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 2 x i64> undef
+}
+
+define <vscale x 4 x i64> @test_ret_nxv4i64() {
+  ; RV32-LABEL: name: test_ret_nxv4i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 4 x i64> undef
+}
+
+define <vscale x 8 x i64> @test_ret_nxv8i64() {
+  ; RV32-LABEL: name: test_ret_nxv8i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 8 x i64> undef
+}
+
+define <vscale x 64 x i1> @test_ret_nxv64i1() {
+  ; RV32-LABEL: name: test_ret_nxv64i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv64i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 64 x i1> undef
+}
+
+define <vscale x 32 x i1> @test_ret_nxv32i1() {
+  ; RV32-LABEL: name: test_ret_nxv32i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 32 x i1> undef
+}
+
+define <vscale x 16 x i1> @test_ret_nxv16i1() {
+  ; RV32-LABEL: name: test_ret_nxv16i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 16 x i1> undef
+}
+
+define <vscale x 8 x i1> @test_ret_nxv8i1() {
+  ; RV32-LABEL: name: test_ret_nxv8i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 8 x i1> undef
+}
+
+define <vscale x 4 x i1> @test_ret_nxv4i1() {
+  ; RV32-LABEL: name: test_ret_nxv4i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i1> undef
+}
+
+define <vscale x 2 x i1> @test_ret_nxv2i1() {
+  ; RV32-LABEL: name: test_ret_nxv2i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i1> undef
+}
+
+define <vscale x 1 x i1> @test_ret_nxv1i1() {
+  ; RV32-LABEL: name: test_ret_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i1> undef
+}
+
+define <vscale x 1 x float> @test_ret_nxv1f32() {
+  ; RV32-LABEL: name: test_ret_nxv1f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x float> undef
+}
+
+define <vscale x 2 x float> @test_ret_nxv2f32() {
+  ; RV32-LABEL: name: test_ret_nxv2f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x float> undef
+}
+
+define <vscale x 4 x float> @test_ret_nxv4f32() {
+  ; RV32-LABEL: name: test_ret_nxv4f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 4 x float> undef
+}
+
+define <vscale x 8 x float> @test_ret_nxv8f32() {
+  ; RV32-LABEL: name: test_ret_nxv8f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 8 x float> undef
+}
+
+define <vscale x 16 x float> @test_ret_nxv16f32() {
+  ; RV32-LABEL: name: test_ret_nxv16f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 16 x float> undef
+}
+
+define <vscale x 1 x double> @test_ret_nxv1f64() {
+  ; RV32-LABEL: name: test_ret_nxv1f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x double> undef
+}
+
+define <vscale x 2 x double> @test_ret_nxv2f64() {
+  ; RV32-LABEL: name: test_ret_nxv2f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 2 x double> undef
+}
+
+define <vscale x 4 x double> @test_ret_nxv4f64() {
+  ; RV32-LABEL: name: test_ret_nxv4f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 4 x double> undef
+}
+
+define <vscale x 8 x double> @test_ret_nxv8f64() {
+  ; RV32-LABEL: name: test_ret_nxv8f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 8 x double> undef
+}
+
+define <vscale x 1 x half> @test_ret_nxv1f16() {
+  ; RV32-LABEL: name: test_ret_nxv1f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x half> undef
+}
+
+define <vscale x 2 x half> @test_ret_nxv2f16() {
+  ; RV32-LABEL: name: test_ret_nxv2f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x half> undef
+}
+
+define <vscale x 4 x half> @test_ret_nxv4f16() {
+  ; RV32-LABEL: name: test_ret_nxv4f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x half> undef
+}
+
+define <vscale x 8 x half> @test_ret_nxv8f16() {
+  ; RV32-LABEL: name: test_ret_nxv8f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x half> undef
+}
+
+define <vscale x 16 x half> @test_ret_nxv16f16() {
+  ; RV32-LABEL: name: test_ret_nxv16f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x half> undef
+}
+
+define <vscale x 32 x half> @test_ret_nxv32f16() {
+  ; RV32-LABEL: name: test_ret_nxv32f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x half> undef
+}
+
+define <vscale x 1 x bfloat> @test_ret_nxv1b16() {
+  ; RV32-LABEL: name: test_ret_nxv1b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x bfloat> undef
+}
+
+define <vscale x 2 x bfloat> @test_ret_nxv2b16() {
+  ; RV32-LABEL: name: test_ret_nxv2b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x bfloat> undef
+}
+
+define <vscale x 4 x bfloat> @test_ret_nxv4b16() {
+  ; RV32-LABEL: name: test_ret_nxv4b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x bfloat> undef
+}
+
+define <vscale x 8 x bfloat> @test_ret_nxv8b16() {
+  ; RV32-LABEL: name: test_ret_nxv8b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x bfloat> undef
+}
+
+define <vscale x 16 x bfloat> @test_ret_nxv16b16() {
+  ; RV32-LABEL: name: test_ret_nxv16b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x bfloat> undef
+}
+
+define <vscale x 32 x bfloat> @test_ret_nxv32b16() {
+  ; RV32-LABEL: name: test_ret_nxv32b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x bfloat> undef
+}

>From a6c8de3ffff621d7acaa1077ab9c9aa6f245240a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 06:25:13 -0800
Subject: [PATCH 3/6] [RISCV][GISEL] Add vector RegisterBanks and vector
 support in getRegBankFromRegClass

Vector Register banks are created for the various register vector
register groupings. getRegBankFromRegClass is implemented to go from
vector TargetRegisterClass to the corresponding vector RegisterBank.
---
 llvm/lib/CodeGen/MachineVerifier.cpp          |    3 -
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |   20 +
 .../Target/RISCV/GISel/RISCVRegisterBanks.td  |   12 +
 .../GlobalISel/regbankselect/vec-args-ret.mir | 1259 +++++++++++++++++
 4 files changed, 1291 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 417d9b359591153..729dfc67491ee14 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1966,9 +1966,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
-    if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
-        !DstSize.isScalable())
-      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index cb1da8ff11c08cb..5a4017ec14241d9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -100,6 +100,26 @@ RISCVRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
   case RISCV::FPR64CRegClassID:
   case RISCV::FPR32CRegClassID:
     return getRegBank(RISCV::FPRBRegBankID);
+  case RISCV::VRRegClassID:
+    return getRegBank(RISCV::VRRegBankID);
+  case RISCV::VRNoV0RegClassID:
+    return getRegBank(RISCV::VRNoV0RegBankID);
+  case RISCV::VRM2RegClassID:
+    return getRegBank(RISCV::VRM2RegBankID);
+  case RISCV::VRM2NoV0RegClassID:
+    return getRegBank(RISCV::VRM2NoV0RegBankID);
+  case RISCV::VRM4RegClassID:
+    return getRegBank(RISCV::VRM4RegBankID);
+  case RISCV::VRM4NoV0RegClassID:
+    return getRegBank(RISCV::VRM4NoV0RegBankID);
+  case RISCV::VRM8RegClassID:
+    return getRegBank(RISCV::VRM8RegBankID);
+  case RISCV::VRM8NoV0RegClassID:
+    return getRegBank(RISCV::VRM8NoV0RegBankID);
+  case RISCV::VMRegClassID:
+    return getRegBank(RISCV::VMRegBankID);
+  case RISCV::VMV0RegClassID:
+    return getRegBank(RISCV::VMV0RegBankID);
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
index b0556ec44e8f3dd..d3baba421dfb251 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
@@ -14,3 +14,15 @@ def GPRBRegBank : RegisterBank<"GPRB", [GPR]>;
 
 /// Floating Point Registers: F.
 def FPRBRegBank : RegisterBank<"FPRB", [FPR64]>;
+
+/// Vector Register Banks:
+def VRRegBank : RegisterBank<"VRB", [VR]>;
+def VRNoV0RegBank : RegisterBank<"VRNoV0B", [VRNoV0]>;
+def VRM2RegBank : RegisterBank<"VRM2B", [VRM2]>;
+def VRM2NoV0RegBank : RegisterBank<"VRM2NoV0B", [VRM2NoV0]>;
+def VRM4RegBank : RegisterBank<"VRM4B", [VRM4]>;
+def VRM4NoV0RegBank : RegisterBank<"VRM4NoV0B", [VRM4NoV0]>;
+def VRM8RegBank : RegisterBank<"VRM8B", [VRM8]>;
+def VRM8NoV0RegBank : RegisterBank<"VRM8NoV0B", [VRM8NoV0]>;
+def VMRegBank : RegisterBank<"VMB", [VM]>;
+def VMV0RegBank : RegisterBank<"VMNoV0B", [VMV0]>;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
new file mode 100644
index 000000000000000..3a4808d1a133835
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
@@ -0,0 +1,1259 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32  -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefixes=RV32 %s
+# RUN: llc -mtriple=riscv64  -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefixes=RV64 %s
+
+...
+---
+name:            test_ret_nxv1i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    $v8 = COPY %0(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv16i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv32i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv64i8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv64i8
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv64i8
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv16i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv32i16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32i16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32i16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv8i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv16i32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv4i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv8i64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv64i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv64i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv64i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 64 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 64 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv32i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 32 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 32 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv16i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 16 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 16 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 8 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv1i1
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1i1
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1i1
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s1>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s1>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv1f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv8f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv16f32
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16f32
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16f32
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv4f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv8f64
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8f64
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8f64
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv16f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv32f16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32f16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32f16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_ret_nxv1b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv1b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv1b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv2b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv2b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv2b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv4b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv4b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv4b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_ret_nxv8b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv8b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64-LABEL: name: test_ret_nxv8b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_ret_nxv16b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv16b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64-LABEL: name: test_ret_nxv16b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_ret_nxv32b16
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; RV32-LABEL: name: test_ret_nxv32b16
+    ; RV32: liveins: $v8
+    ; RV32-NEXT: {{  $}}
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV32-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64-LABEL: name: test_ret_nxv32b16
+    ; RV64: liveins: $v8
+    ; RV64-NEXT: {{  $}}
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
+    ; RV64-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...

>From 84f05cb12204590edff8af9c54ddd3fa3cb613e8 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 9 Nov 2023 14:26:22 -0800
Subject: [PATCH 4/6] single vector regbank

---
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |  11 +-
 .../Target/RISCV/GISel/RISCVRegisterBanks.td  |  12 +-
 .../GlobalISel/regbankselect/vec-args-ret.mir | 200 +++++++++---------
 3 files changed, 103 insertions(+), 120 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 5a4017ec14241d9..2eeaf23f5635859 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -101,25 +101,16 @@ RISCVRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
   case RISCV::FPR32CRegClassID:
     return getRegBank(RISCV::FPRBRegBankID);
   case RISCV::VRRegClassID:
-    return getRegBank(RISCV::VRRegBankID);
   case RISCV::VRNoV0RegClassID:
-    return getRegBank(RISCV::VRNoV0RegBankID);
   case RISCV::VRM2RegClassID:
-    return getRegBank(RISCV::VRM2RegBankID);
   case RISCV::VRM2NoV0RegClassID:
-    return getRegBank(RISCV::VRM2NoV0RegBankID);
   case RISCV::VRM4RegClassID:
-    return getRegBank(RISCV::VRM4RegBankID);
   case RISCV::VRM4NoV0RegClassID:
-    return getRegBank(RISCV::VRM4NoV0RegBankID);
   case RISCV::VRM8RegClassID:
-    return getRegBank(RISCV::VRM8RegBankID);
   case RISCV::VRM8NoV0RegClassID:
-    return getRegBank(RISCV::VRM8NoV0RegBankID);
   case RISCV::VMRegClassID:
-    return getRegBank(RISCV::VMRegBankID);
   case RISCV::VMV0RegClassID:
-    return getRegBank(RISCV::VMV0RegBankID);
+    return getRegBank(RISCV::VRRegBankID);
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
index d3baba421dfb251..c0d50faabbaa7ab 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
@@ -15,14 +15,6 @@ def GPRBRegBank : RegisterBank<"GPRB", [GPR]>;
 /// Floating Point Registers: F.
 def FPRBRegBank : RegisterBank<"FPRB", [FPR64]>;
 
-/// Vector Register Banks:
+/// Vector Regististers: V.
 def VRRegBank : RegisterBank<"VRB", [VR]>;
-def VRNoV0RegBank : RegisterBank<"VRNoV0B", [VRNoV0]>;
-def VRM2RegBank : RegisterBank<"VRM2B", [VRM2]>;
-def VRM2NoV0RegBank : RegisterBank<"VRM2NoV0B", [VRM2NoV0]>;
-def VRM4RegBank : RegisterBank<"VRM4B", [VRM4]>;
-def VRM4NoV0RegBank : RegisterBank<"VRM4NoV0B", [VRM4NoV0]>;
-def VRM8RegBank : RegisterBank<"VRM8B", [VRM8]>;
-def VRM8NoV0RegBank : RegisterBank<"VRM8NoV0B", [VRM8NoV0]>;
-def VMRegBank : RegisterBank<"VMB", [VM]>;
-def VMV0RegBank : RegisterBank<"VMNoV0B", [VMV0]>;
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
index 3a4808d1a133835..e20d5f7fbb74fb7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-args-ret.mir
@@ -17,14 +17,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
@@ -42,14 +42,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
@@ -67,14 +67,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
@@ -92,14 +92,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv8i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
@@ -117,14 +117,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv16i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 16 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 16 x s8>) = COPY $v8
@@ -142,14 +142,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv32i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 32 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 32 x s8>) = COPY $v8
@@ -167,14 +167,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv64i8
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv64i8
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s8>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 64 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 64 x s8>) = COPY $v8
@@ -192,14 +192,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
@@ -217,14 +217,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
@@ -242,14 +242,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
@@ -267,14 +267,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv8i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8
@@ -292,14 +292,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv16i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8
@@ -317,14 +317,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32i16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv32i16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8
@@ -342,14 +342,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
@@ -367,14 +367,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
@@ -392,14 +392,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv4i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = COPY $v8
@@ -417,14 +417,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv8i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = COPY $v8
@@ -442,14 +442,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv16i32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = COPY $v8
@@ -467,14 +467,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
@@ -492,14 +492,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv2i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = COPY $v8
@@ -517,14 +517,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv4i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = COPY $v8
@@ -542,14 +542,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv8i64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = COPY $v8
@@ -567,14 +567,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv64i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv64i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 64 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 64 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 64 x s1>) = COPY $v8
@@ -592,14 +592,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv32i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 32 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 32 x s1>) = COPY $v8
@@ -617,14 +617,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv16i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 16 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 16 x s1>) = COPY $v8
@@ -642,14 +642,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv8i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 8 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s1>) = COPY $v8
@@ -667,14 +667,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s1>) = COPY $v8
@@ -692,14 +692,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s1>) = COPY $v8
@@ -717,14 +717,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1i1
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s1>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1i1
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s1>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s1>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s1>) = COPY $v8
@@ -742,14 +742,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
@@ -767,14 +767,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
@@ -792,14 +792,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv4f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = COPY $v8
@@ -817,14 +817,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv8f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = COPY $v8
@@ -842,14 +842,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16f32
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv16f32
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s32>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = COPY $v8
@@ -867,14 +867,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
@@ -892,14 +892,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv2f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 2 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = COPY $v8
@@ -917,14 +917,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv4f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 4 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = COPY $v8
@@ -942,14 +942,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8f64
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv8f64
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s64>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 8 x s64>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = COPY $v8
@@ -967,14 +967,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
@@ -992,14 +992,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
@@ -1017,14 +1017,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
@@ -1042,14 +1042,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv8f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8
@@ -1067,14 +1067,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv16f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8
@@ -1092,14 +1092,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32f16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv32f16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8
@@ -1117,14 +1117,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv1b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv1b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 1 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
@@ -1142,14 +1142,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv2b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv2b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 2 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
@@ -1167,14 +1167,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv4b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV32-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
     ;
     ; RV64-LABEL: name: test_ret_nxv4b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 4 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
     ; RV64-NEXT: $v8 = COPY [[COPY]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
@@ -1192,14 +1192,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv8b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m2
     ;
     ; RV64-LABEL: name: test_ret_nxv8b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 8 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m2 = COPY [[COPY]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8
@@ -1217,14 +1217,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv16b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m4
     ;
     ; RV64-LABEL: name: test_ret_nxv16b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 16 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m4 = COPY [[COPY]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8
@@ -1242,14 +1242,14 @@ body:             |
     ; RV32-LABEL: name: test_ret_nxv32b16
     ; RV32: liveins: $v8
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV32-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV32-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8m8
     ;
     ; RV64-LABEL: name: test_ret_nxv32b16
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrnov0b(<vscale x 32 x s16>) = COPY $v8
+    ; RV64-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8
     ; RV64-NEXT: $v8m8 = COPY [[COPY]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8

>From f7a4d6f602d10ce1badb49d016b3248b3e1616bd Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 13 Nov 2023 12:50:39 -0800
Subject: [PATCH 5/6] update vrbregbank to use vrm8

---
 llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 2 +-
 llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 2eeaf23f5635859..094a6ed55baebdb 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -110,7 +110,7 @@ RISCVRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
   case RISCV::VRM8NoV0RegClassID:
   case RISCV::VMRegClassID:
   case RISCV::VMV0RegClassID:
-    return getRegBank(RISCV::VRRegBankID);
+    return getRegBank(RISCV::VRBRegBankID);
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
index c0d50faabbaa7ab..b1ef815fe373521 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBanks.td
@@ -15,6 +15,6 @@ def GPRBRegBank : RegisterBank<"GPRB", [GPR]>;
 /// Floating Point Registers: F.
 def FPRBRegBank : RegisterBank<"FPRB", [FPR64]>;
 
-/// Vector Regististers: V.
-def VRRegBank : RegisterBank<"VRB", [VR]>;
+/// Vector Registers : V.
+def VRBRegBank : RegisterBank<"VRB", [VRM8]>;
 

>From bafe16285e481acc073a5aa0efb5b40257aed692 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 13 Nov 2023 12:53:36 -0800
Subject: [PATCH 6/6] add more vector reg classes

---
 llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 094a6ed55baebdb..9902b0d94bc661c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -100,16 +100,19 @@ RISCVRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
   case RISCV::FPR64CRegClassID:
   case RISCV::FPR32CRegClassID:
     return getRegBank(RISCV::FPRBRegBankID);
+  case RISCV::VMRegClassID:
   case RISCV::VRRegClassID:
   case RISCV::VRNoV0RegClassID:
   case RISCV::VRM2RegClassID:
   case RISCV::VRM2NoV0RegClassID:
   case RISCV::VRM4RegClassID:
   case RISCV::VRM4NoV0RegClassID:
+  case RISCV::VMV0RegClassID:
+  case RISCV::VRM2_with_sub_vrm1_0_in_VMV0RegClassID:
+  case RISCV::VRM4_with_sub_vrm1_0_in_VMV0RegClassID:
   case RISCV::VRM8RegClassID:
   case RISCV::VRM8NoV0RegClassID:
-  case RISCV::VMRegClassID:
-  case RISCV::VMV0RegClassID:
+  case RISCV::VRM8_with_sub_vrm1_0_in_VMV0RegClassID:
     return getRegBank(RISCV::VRBRegBankID);
   }
 }



More information about the llvm-commits mailing list