[llvm] [RISCV][GISel] RegBank select and instruction select for vector G_ADD, G_SUB (PR #74114)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 31 11:35:26 PST 2024


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/74114

>From 89c8fd2d7773abf559ec67d789806a9508722d55 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 5 Dec 2023 19:54:00 -0500
Subject: [PATCH 01/12] use isKnownGT to compare LLT and TargetRegister

---
 llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 30b2430249d23..0b8d483e5923d 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -281,7 +281,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
     }
 
     const LLT Ty = MRI.getType(VReg);
-    if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
+    if (Ty.isValid() && TypeSize::isKnownGT(Ty.getSizeInBits(), TRI.getRegSizeInBits(*RC))) {
       reportGISelFailure(
           MF, TPC, MORE, "gisel-select",
           "VReg's low-level type and register class have different sizes", *MI);

>From 73910bbd2b9121e9331487f71bc431aa06bfd8a8 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 08:23:48 -0500
Subject: [PATCH 02/12] getRegClassForTypeOnBank for vector register bank

---
 .../RISCV/GISel/RISCVInstructionSelector.cpp     | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 65a0a3e0929fc..bc80fb0204802 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -845,6 +845,22 @@ const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
   }
 
   // TODO: Non-GPR register classes.
+
+  if (RB.getID() == RISCV::VRBRegBankID) {
+    if (Ty.getSizeInBits().getKnownMinValue() <= 64) {
+      return &RISCV::VRRegClass;
+    }
+    if (Ty.getSizeInBits().getKnownMinValue() == 128) {
+      return &RISCV::VRM2RegClass;
+    }
+    if (Ty.getSizeInBits().getKnownMinValue() == 256) {
+      return &RISCV::VRM4RegClass;
+    }
+    if (Ty.getSizeInBits().getKnownMinValue() == 512) {
+      return &RISCV::VRM8RegClass;
+    }
+  }
+
   return nullptr;
 }
 

>From f3644c976ec6b5f8e458f4b1cad1975277a8b95e Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 08:43:27 -0500
Subject: [PATCH 03/12] vector instr sel for add and sub

---
 .../instruction-select/rvv/add-rv32.mir       | 556 ++++++++++++++++++
 .../instruction-select/rvv/add-rv64.mir       | 556 ++++++++++++++++++
 .../instruction-select/rvv/sub-rv32.mir       | 556 ++++++++++++++++++
 .../instruction-select/rvv/sub-rv64.mir       | 556 ++++++++++++++++++
 4 files changed, 2224 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
new file mode 100644
index 0000000000000..a21883f966d4c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            add_nxv1s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv1s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s8>) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = COPY $x11
+    %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv2s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s8>) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = COPY $x11
+    %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv4s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s8>) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = COPY $x11
+    %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv8s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s8>) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = COPY $x11
+    %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv16s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv16s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s8>) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $x11
+    %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv32s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv32s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s8>) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = COPY $x11
+    %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv64s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv64s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 64 x s8>) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = COPY $x11
+    %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv1s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv1s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s16>) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = COPY $x11
+    %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv2s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s16>) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = COPY $x11
+    %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv4s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s16>) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $x11
+    %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv8s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s16>) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = COPY $x11
+    %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv16s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv16s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s16>) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = COPY $x11
+    %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv32s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv32s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s16>) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = COPY $x11
+    %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv1s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv1s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s32>) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = COPY $x11
+    %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv2s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s32>) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $x11
+    %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv4s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s32>) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = COPY $x11
+    %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv8s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s32>) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = COPY $x11
+    %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv16s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv16s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s32>) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = COPY $x11
+    %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv1s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv1s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s64>) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = COPY $x11
+    %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv2s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s64>) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $x11
+    %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv4s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s64>) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = COPY $x11
+    %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_nxv8s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s64>) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = COPY $x11
+    %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
new file mode 100644
index 0000000000000..572d24ee4c7be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            add_nxv1s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv1s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s8>) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = COPY $x11
+    %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv2s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s8>) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = COPY $x11
+    %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv4s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s8>) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = COPY $x11
+    %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv8s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s8>) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = COPY $x11
+    %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv16s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv16s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s8>) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $x11
+    %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv32s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv32s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s8>) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = COPY $x11
+    %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv64s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv64s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 64 x s8>) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = COPY $x11
+    %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv1s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv1s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s16>) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = COPY $x11
+    %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv2s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s16>) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = COPY $x11
+    %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv4s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s16>) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $x11
+    %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv8s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s16>) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = COPY $x11
+    %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv16s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv16s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s16>) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = COPY $x11
+    %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv32s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv32s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s16>) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = COPY $x11
+    %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv1s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv1s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s32>) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = COPY $x11
+    %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv2s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s32>) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $x11
+    %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv4s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s32>) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = COPY $x11
+    %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv8s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s32>) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = COPY $x11
+    %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv16s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv16s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s32>) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = COPY $x11
+    %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv1s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv1s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s64>) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = COPY $x11
+    %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv2s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv2s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s64>) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $x11
+    %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv4s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv4s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s64>) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = COPY $x11
+    %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_nxv8s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_nxv8s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s64>) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = COPY $x11
+    %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
+    $x10 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
new file mode 100644
index 0000000000000..b08361138c77d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            sub_nxv1s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv1s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s8>) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = COPY $x11
+    %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv2s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s8>) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = COPY $x11
+    %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv4s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s8>) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = COPY $x11
+    %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv8s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s8>) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = COPY $x11
+    %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv16s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv16s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s8>) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $x11
+    %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv32s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv32s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s8>) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = COPY $x11
+    %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv64s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv64s8
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 64 x s8>) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = COPY $x11
+    %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv1s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv1s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s16>) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = COPY $x11
+    %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv2s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s16>) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = COPY $x11
+    %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv4s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s16>) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $x11
+    %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv8s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s16>) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = COPY $x11
+    %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv16s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv16s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s16>) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = COPY $x11
+    %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv32s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv32s16
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s16>) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = COPY $x11
+    %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv1s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv1s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s32>) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = COPY $x11
+    %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv2s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s32>) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $x11
+    %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv4s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s32>) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = COPY $x11
+    %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv8s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s32>) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = COPY $x11
+    %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv16s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv16s32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s32>) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = COPY $x11
+    %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv1s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv1s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s64>) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = COPY $x11
+    %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv2s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s64>) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $x11
+    %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv4s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s64>) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = COPY $x11
+    %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_nxv8s64
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s64>) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = COPY $x11
+    %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
new file mode 100644
index 0000000000000..71150f49a4f08
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            sub_nxv1s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv1s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s8>) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = COPY $x11
+    %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv2s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s8>) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = COPY $x11
+    %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv4s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s8>) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = COPY $x11
+    %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv8s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s8>) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = COPY $x11
+    %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv16s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv16s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s8>) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $x11
+    %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv32s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv32s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s8>) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = COPY $x11
+    %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv64s8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv64s8
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 64 x s8>) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = COPY $x11
+    %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv1s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv1s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s16>) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = COPY $x11
+    %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv2s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s16>) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = COPY $x11
+    %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv4s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s16>) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $x11
+    %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv8s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s16>) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = COPY $x11
+    %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv16s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv16s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s16>) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = COPY $x11
+    %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv32s16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv32s16
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 32 x s16>) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = COPY $x11
+    %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv1s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv1s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s32>) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = COPY $x11
+    %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv2s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s32>) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $x11
+    %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv4s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s32>) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = COPY $x11
+    %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv8s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s32>) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = COPY $x11
+    %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv16s32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv16s32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 16 x s32>) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = COPY $x11
+    %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv1s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv1s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 1 x s64>) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = COPY $x11
+    %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv2s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv2s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 2 x s64>) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $x11
+    %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv4s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv4s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 4 x s64>) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = COPY $x11
+    %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_nxv8s64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_nxv8s64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:vrb(<vscale x 8 x s64>) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = COPY $x11
+    %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
+    $x10 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $x10
+
+...
+---
+

>From e7a43e66ecfd9356bb771411c2b199113eeff208 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Sun, 10 Dec 2023 09:08:52 -0500
Subject: [PATCH 04/12] getInstrMapping for vector add and sub; regbankselect
 test cases for vector add and sub

---
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |  59 +-
 .../regbankselect/vec-add-sub-rv32.mir        | 511 ++++++++++++++++++
 .../regbankselect/vec-add-sub-rv64.mir        | 511 ++++++++++++++++++
 3 files changed, 1076 insertions(+), 5 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index cf0ff63a5e51c..fe3038570357d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -25,10 +25,9 @@ namespace llvm {
 namespace RISCV {
 
 const RegisterBankInfo::PartialMapping PartMappings[] = {
-    {0, 32, GPRBRegBank},
-    {0, 64, GPRBRegBank},
-    {0, 32, FPRBRegBank},
-    {0, 64, FPRBRegBank},
+    {0, 32, GPRBRegBank}, {0, 64, GPRBRegBank}, {0, 32, FPRBRegBank},
+    {0, 64, FPRBRegBank}, {0, 64, VRBRegBank},  {0, 128, VRBRegBank},
+    {0, 256, VRBRegBank}, {0, 512, VRBRegBank},
 };
 
 enum PartialMappingIdx {
@@ -36,6 +35,10 @@ enum PartialMappingIdx {
   PMI_GPRB64 = 1,
   PMI_FPRB32 = 2,
   PMI_FPRB64 = 3,
+  PMI_VRB64 = 4,
+  PMI_VRB128 = 5,
+  PMI_VRB256 = 6,
+  PMI_VRB512 = 7,
 };
 
 const RegisterBankInfo::ValueMapping ValueMappings[] = {
@@ -57,6 +60,22 @@ const RegisterBankInfo::ValueMapping ValueMappings[] = {
     {&PartMappings[PMI_FPRB64], 1},
     {&PartMappings[PMI_FPRB64], 1},
     {&PartMappings[PMI_FPRB64], 1},
+    // Maximum 3 VR LMUL=1 operands.
+    {&PartMappings[PMI_VRB64], 1},
+    {&PartMappings[PMI_VRB64], 1},
+    {&PartMappings[PMI_VRB64], 1},
+    // Maximum 3 VR LMUL=2 operands.
+    {&PartMappings[PMI_VRB128], 1},
+    {&PartMappings[PMI_VRB128], 1},
+    {&PartMappings[PMI_VRB128], 1},
+    // Maximum 3 VR LMUL=4 operands.
+    {&PartMappings[PMI_VRB256], 1},
+    {&PartMappings[PMI_VRB256], 1},
+    {&PartMappings[PMI_VRB256], 1},
+    // Maximum 3 VR LMUL=8 operands.
+    {&PartMappings[PMI_VRB512], 1},
+    {&PartMappings[PMI_VRB512], 1},
+    {&PartMappings[PMI_VRB512], 1},
 };
 
 enum ValueMappingIdx {
@@ -65,6 +84,10 @@ enum ValueMappingIdx {
   GPRB64Idx = 4,
   FPRB32Idx = 7,
   FPRB64Idx = 10,
+  VRB64Idx = 13,
+  VRB128Idx = 16,
+  VRB256Idx = 19,
+  VRB512Idx = 22,
 };
 } // namespace RISCV
 } // namespace llvm
@@ -215,6 +238,23 @@ bool RISCVRegisterBankInfo::anyUseOnlyUseFP(
       [&](const MachineInstr &UseMI) { return onlyUsesFP(UseMI, MRI, TRI); });
 }
 
+static const RegisterBankInfo::ValueMapping *getVRBValueMapping(unsigned Size) {
+  unsigned Idx;
+
+  if (Size <= 64)
+    Idx = RISCV::VRB64Idx;
+  else if (Size == 128)
+    Idx = RISCV::VRB128Idx;
+  else if (Size == 256)
+    Idx = RISCV::VRB256Idx;
+  else if (Size == 512)
+    Idx = RISCV::VRB512Idx;
+  else
+    llvm::report_fatal_error("Invalid Size");
+
+  return &RISCV::ValueMappings[Idx];
+}
+
 const RegisterBankInfo::InstructionMapping &
 RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   const unsigned Opc = MI.getOpcode();
@@ -242,7 +282,16 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
 
   switch (Opc) {
   case TargetOpcode::G_ADD:
-  case TargetOpcode::G_SUB:
+  case TargetOpcode::G_SUB: {
+    if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+      LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+      return getInstructionMapping(
+          DefaultMappingID, /*Cost=*/1,
+          getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()),
+          NumOperands);
+    }
+  }
+    LLVM_FALLTHROUGH;
   case TargetOpcode::G_SHL:
   case TargetOpcode::G_ASHR:
   case TargetOpcode::G_LSHR:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir
new file mode 100644
index 0000000000000..d45009a417297
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir
@@ -0,0 +1,511 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+
+---
+name:            add_nxv1s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv1s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s8>) = COPY $v10
+    %1:_(<vscale x 1 x s8>) = COPY $v11
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv2s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s8>) = COPY $v10
+    %1:_(<vscale x 2 x s8>) = COPY $v11
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv4s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s8>) = COPY $v10
+    %1:_(<vscale x 4 x s8>) = COPY $v11
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv8s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s8>) = COPY $v10
+    %1:_(<vscale x 8 x s8>) = COPY $v11
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv16s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv16s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 16 x s8>) = COPY $v10
+    %1:_(<vscale x 16 x s8>) = COPY $v11
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv32s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv32s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 32 x s8>) = COPY $v10
+    %1:_(<vscale x 32 x s8>) = COPY $v11
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv64s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv64s8
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 64 x s8>) = COPY $v10
+    %1:_(<vscale x 64 x s8>) = COPY $v11
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv1s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv1s16
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s16>) = COPY $v10
+    %1:_(<vscale x 1 x s16>) = COPY $v11
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv2s16
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s16>) = COPY $v10
+    %1:_(<vscale x 2 x s16>) = COPY $v11
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv4s16
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s16>) = COPY $v10
+    %1:_(<vscale x 4 x s16>) = COPY $v11
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv8s16
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s16>) = COPY $v10
+    %1:_(<vscale x 8 x s16>) = COPY $v11
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv16s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv16s16
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 16 x s16>) = COPY $v10
+    %1:_(<vscale x 16 x s16>) = COPY $v11
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv32s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv32s16
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 32 x s16>) = COPY $v10
+    %1:_(<vscale x 32 x s16>) = COPY $v11
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv1s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv1s32
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s32>) = COPY $v10
+    %1:_(<vscale x 1 x s32>) = COPY $v11
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv2s32
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s32>) = COPY $v10
+    %1:_(<vscale x 2 x s32>) = COPY $v11
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv4s32
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s32>) = COPY $v10
+    %1:_(<vscale x 4 x s32>) = COPY $v11
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv8s32
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s32>) = COPY $v10
+    %1:_(<vscale x 8 x s32>) = COPY $v11
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv16s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv16s32
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 16 x s32>) = COPY $v10
+    %1:_(<vscale x 16 x s32>) = COPY $v11
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv1s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv1s64
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s64>) = COPY $v10
+    %1:_(<vscale x 1 x s64>) = COPY $v11
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv2s64
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s64>) = COPY $v10
+    %1:_(<vscale x 2 x s64>) = COPY $v11
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: add_nxv4s64
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s64>) = COPY $v10
+    %1:_(<vscale x 4 x s64>) = COPY $v11
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV32I-LABEL: name: sub_nxv8s64
+    ; RV32I: liveins: $v10, $v11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s64>) = COPY $v10
+    %1:_(<vscale x 8 x s64>) = COPY $v11
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir
new file mode 100644
index 0000000000000..4233e0557ab43
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir
@@ -0,0 +1,511 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name:            add_nxv1s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv1s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s8>) = COPY $v10
+    %1:_(<vscale x 1 x s8>) = COPY $v11
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv2s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s8>) = COPY $v10
+    %1:_(<vscale x 2 x s8>) = COPY $v11
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv4s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s8>) = COPY $v10
+    %1:_(<vscale x 4 x s8>) = COPY $v11
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv8s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s8>) = COPY $v10
+    %1:_(<vscale x 8 x s8>) = COPY $v11
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv16s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv16s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 16 x s8>) = COPY $v10
+    %1:_(<vscale x 16 x s8>) = COPY $v11
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv32s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv32s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 32 x s8>) = COPY $v10
+    %1:_(<vscale x 32 x s8>) = COPY $v11
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv64s8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv64s8
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 64 x s8>) = COPY $v10
+    %1:_(<vscale x 64 x s8>) = COPY $v11
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv1s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv1s16
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s16>) = COPY $v10
+    %1:_(<vscale x 1 x s16>) = COPY $v11
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv2s16
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s16>) = COPY $v10
+    %1:_(<vscale x 2 x s16>) = COPY $v11
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv4s16
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s16>) = COPY $v10
+    %1:_(<vscale x 4 x s16>) = COPY $v11
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv8s16
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s16>) = COPY $v10
+    %1:_(<vscale x 8 x s16>) = COPY $v11
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv16s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv16s16
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 16 x s16>) = COPY $v10
+    %1:_(<vscale x 16 x s16>) = COPY $v11
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv32s16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv32s16
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 32 x s16>) = COPY $v10
+    %1:_(<vscale x 32 x s16>) = COPY $v11
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv1s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv1s32
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s32>) = COPY $v10
+    %1:_(<vscale x 1 x s32>) = COPY $v11
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv2s32
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s32>) = COPY $v10
+    %1:_(<vscale x 2 x s32>) = COPY $v11
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv4s32
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s32>) = COPY $v10
+    %1:_(<vscale x 4 x s32>) = COPY $v11
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv8s32
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s32>) = COPY $v10
+    %1:_(<vscale x 8 x s32>) = COPY $v11
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv16s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv16s32
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 16 x s32>) = COPY $v10
+    %1:_(<vscale x 16 x s32>) = COPY $v11
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv1s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv1s64
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 1 x s64>) = COPY $v10
+    %1:_(<vscale x 1 x s64>) = COPY $v11
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv2s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv2s64
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 2 x s64>) = COPY $v10
+    %1:_(<vscale x 2 x s64>) = COPY $v11
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            add_nxv4s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: add_nxv4s64
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 4 x s64>) = COPY $v10
+    %1:_(<vscale x 4 x s64>) = COPY $v11
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    $v10 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v10
+
+...
+---
+name:            sub_nxv8s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v10, $v11
+
+    ; RV64I-LABEL: name: sub_nxv8s64
+    ; RV64I: liveins: $v10, $v11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v10
+    %0:_(<vscale x 8 x s64>) = COPY $v10
+    %1:_(<vscale x 8 x s64>) = COPY $v11
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    $v10 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v10
+
+...

>From 585d8dbe6d37829b360826820d7961afd67d5bd8 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 12 Dec 2023 10:30:44 -0500
Subject: [PATCH 05/12] drop curly braces around single lines

---
 .../RISCV/GISel/RISCVInstructionSelector.cpp      | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index bc80fb0204802..b7608e7340635 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -847,18 +847,17 @@ const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
   // TODO: Non-GPR register classes.
 
   if (RB.getID() == RISCV::VRBRegBankID) {
-    if (Ty.getSizeInBits().getKnownMinValue() <= 64) {
+    if (Ty.getSizeInBits().getKnownMinValue() <= 64)
       return &RISCV::VRRegClass;
-    }
-    if (Ty.getSizeInBits().getKnownMinValue() == 128) {
+
+    if (Ty.getSizeInBits().getKnownMinValue() == 128)
       return &RISCV::VRM2RegClass;
-    }
-    if (Ty.getSizeInBits().getKnownMinValue() == 256) {
+
+    if (Ty.getSizeInBits().getKnownMinValue() == 256)
       return &RISCV::VRM4RegClass;
-    }
-    if (Ty.getSizeInBits().getKnownMinValue() == 512) {
+
+    if (Ty.getSizeInBits().getKnownMinValue() == 512)
       return &RISCV::VRM8RegClass;
-    }
   }
 
   return nullptr;

>From f20faf03b837657a3b2ee6a239b9496146fd119c Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 12 Dec 2023 12:40:47 -0500
Subject: [PATCH 06/12] correct vectorized G_ADD, G_SUB instruction selection
 test cases

---
 .../instruction-select/rvv/add-rv32.mir       | 556 -------------
 .../instruction-select/rvv/add-rv64.mir       | 556 -------------
 .../GlobalISel/instruction-select/rvv/add.mir | 774 ++++++++++++++++++
 .../instruction-select/rvv/sub-rv32.mir       | 556 -------------
 .../instruction-select/rvv/sub-rv64.mir       | 556 -------------
 .../GlobalISel/instruction-select/rvv/sub.mir | 774 ++++++++++++++++++
 6 files changed, 1548 insertions(+), 2224 deletions(-)
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
deleted file mode 100644
index a21883f966d4c..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
+++ /dev/null
@@ -1,556 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
-# RUN: | FileCheck -check-prefix=RV32I %s
-
----
-name:            add_nxv1s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv1s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s8>) = COPY $x10
-    %1:vrb(<vscale x 1 x s8>) = COPY $x11
-    %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv2s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s8>) = COPY $x10
-    %1:vrb(<vscale x 2 x s8>) = COPY $x11
-    %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv4s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s8>) = COPY $x10
-    %1:vrb(<vscale x 4 x s8>) = COPY $x11
-    %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv8s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s8>) = COPY $x10
-    %1:vrb(<vscale x 8 x s8>) = COPY $x11
-    %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv16s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv16s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s8>) = COPY $x10
-    %1:vrb(<vscale x 16 x s8>) = COPY $x11
-    %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 16 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv32s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv32s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s8>) = COPY $x10
-    %1:vrb(<vscale x 32 x s8>) = COPY $x11
-    %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 32 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv64s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv64s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 64 x s8>) = COPY $x10
-    %1:vrb(<vscale x 64 x s8>) = COPY $x11
-    %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 64 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv1s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv1s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s16>) = COPY $x10
-    %1:vrb(<vscale x 1 x s16>) = COPY $x11
-    %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv2s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s16>) = COPY $x10
-    %1:vrb(<vscale x 2 x s16>) = COPY $x11
-    %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv4s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s16>) = COPY $x10
-    %1:vrb(<vscale x 4 x s16>) = COPY $x11
-    %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv8s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s16>) = COPY $x10
-    %1:vrb(<vscale x 8 x s16>) = COPY $x11
-    %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv16s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv16s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s16>) = COPY $x10
-    %1:vrb(<vscale x 16 x s16>) = COPY $x11
-    %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 16 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv32s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv32s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s16>) = COPY $x10
-    %1:vrb(<vscale x 32 x s16>) = COPY $x11
-    %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 32 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv1s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv1s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s32>) = COPY $x10
-    %1:vrb(<vscale x 1 x s32>) = COPY $x11
-    %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv2s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s32>) = COPY $x10
-    %1:vrb(<vscale x 2 x s32>) = COPY $x11
-    %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv4s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s32>) = COPY $x10
-    %1:vrb(<vscale x 4 x s32>) = COPY $x11
-    %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv8s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s32>) = COPY $x10
-    %1:vrb(<vscale x 8 x s32>) = COPY $x11
-    %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv16s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv16s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s32>) = COPY $x10
-    %1:vrb(<vscale x 16 x s32>) = COPY $x11
-    %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 16 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv1s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv1s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s64>) = COPY $x10
-    %1:vrb(<vscale x 1 x s64>) = COPY $x11
-    %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv2s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s64>) = COPY $x10
-    %1:vrb(<vscale x 2 x s64>) = COPY $x11
-    %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv4s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s64>) = COPY $x10
-    %1:vrb(<vscale x 4 x s64>) = COPY $x11
-    %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: add_nxv8s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s64>) = COPY $x10
-    %1:vrb(<vscale x 8 x s64>) = COPY $x11
-    %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
deleted file mode 100644
index 572d24ee4c7be..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
+++ /dev/null
@@ -1,556 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
-# RUN: | FileCheck -check-prefix=RV64I %s
-
----
-name:            add_nxv1s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv1s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s8>) = COPY $x10
-    %1:vrb(<vscale x 1 x s8>) = COPY $x11
-    %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv2s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s8>) = COPY $x10
-    %1:vrb(<vscale x 2 x s8>) = COPY $x11
-    %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv4s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s8>) = COPY $x10
-    %1:vrb(<vscale x 4 x s8>) = COPY $x11
-    %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv8s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s8>) = COPY $x10
-    %1:vrb(<vscale x 8 x s8>) = COPY $x11
-    %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv16s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv16s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s8>) = COPY $x10
-    %1:vrb(<vscale x 16 x s8>) = COPY $x11
-    %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 16 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv32s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv32s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s8>) = COPY $x10
-    %1:vrb(<vscale x 32 x s8>) = COPY $x11
-    %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 32 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv64s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv64s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 64 x s8>) = COPY $x10
-    %1:vrb(<vscale x 64 x s8>) = COPY $x11
-    %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 64 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv1s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv1s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s16>) = COPY $x10
-    %1:vrb(<vscale x 1 x s16>) = COPY $x11
-    %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv2s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s16>) = COPY $x10
-    %1:vrb(<vscale x 2 x s16>) = COPY $x11
-    %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv4s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s16>) = COPY $x10
-    %1:vrb(<vscale x 4 x s16>) = COPY $x11
-    %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv8s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s16>) = COPY $x10
-    %1:vrb(<vscale x 8 x s16>) = COPY $x11
-    %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv16s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv16s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s16>) = COPY $x10
-    %1:vrb(<vscale x 16 x s16>) = COPY $x11
-    %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 16 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv32s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv32s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s16>) = COPY $x10
-    %1:vrb(<vscale x 32 x s16>) = COPY $x11
-    %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 32 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv1s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv1s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s32>) = COPY $x10
-    %1:vrb(<vscale x 1 x s32>) = COPY $x11
-    %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv2s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s32>) = COPY $x10
-    %1:vrb(<vscale x 2 x s32>) = COPY $x11
-    %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv4s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s32>) = COPY $x10
-    %1:vrb(<vscale x 4 x s32>) = COPY $x11
-    %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv8s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s32>) = COPY $x10
-    %1:vrb(<vscale x 8 x s32>) = COPY $x11
-    %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv16s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv16s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s32>) = COPY $x10
-    %1:vrb(<vscale x 16 x s32>) = COPY $x11
-    %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 16 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv1s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv1s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s64>) = COPY $x10
-    %1:vrb(<vscale x 1 x s64>) = COPY $x11
-    %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 1 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv2s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv2s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s64>) = COPY $x10
-    %1:vrb(<vscale x 2 x s64>) = COPY $x11
-    %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 2 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv4s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv4s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s64>) = COPY $x10
-    %1:vrb(<vscale x 4 x s64>) = COPY $x11
-    %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 4 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            add_nxv8s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: add_nxv8s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s64>) = COPY $x10
-    %1:vrb(<vscale x 8 x s64>) = COPY $x11
-    %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
-    $x10 = COPY %2(<vscale x 8 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir
new file mode 100644
index 0000000000000..5b493f6844c01
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir
@@ -0,0 +1,774 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+---
+name:            test_nxv1i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s8>) = COPY $v8
+    %1:vrb(<vscale x 1 x s8>) = COPY $v9
+    %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv2i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv2i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 2 x s8>) = COPY $v8
+    %1:vrb(<vscale x 2 x s8>) = COPY $v9
+    %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv4i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv4i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv4i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 4 x s8>) = COPY $v8
+    %1:vrb(<vscale x 4 x s8>) = COPY $v9
+    %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv8i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv8i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv8i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 8 x s8>) = COPY $v8
+    %1:vrb(<vscale x 8 x s8>) = COPY $v9
+    %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv16i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv16i8
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv16i8
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %1:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv32i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv32i8
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv32i8
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    %1:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv64i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv64i8
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv64i8
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    %1:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_nxv1i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s16>) = COPY $v8
+    %1:vrb(<vscale x 1 x s16>) = COPY $v9
+    %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv2i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv2i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 2 x s16>) = COPY $v8
+    %1:vrb(<vscale x 2 x s16>) = COPY $v9
+    %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv4i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv4i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 4 x s16>) = COPY $v8
+    %1:vrb(<vscale x 4 x s16>) = COPY $v9
+    %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv8i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv8i16
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv8i16
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    %1:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv16i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv16i16
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv16i16
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    %1:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv32i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv32i16
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv32i16
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    %1:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_nxv1i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s32>) = COPY $v8
+    %1:vrb(<vscale x 1 x s32>) = COPY $v9
+    %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv2i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv2i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 2 x s32>) = COPY $v8
+    %1:vrb(<vscale x 2 x s32>) = COPY $v9
+    %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv4i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv4i32
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv4i32
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    %1:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv8i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv8i32
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv8i32
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    %1:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv16i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv16i32
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv16i32
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    %1:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_nxv1i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i64
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i64
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s64>) = COPY $v8
+    %1:vrb(<vscale x 1 x s64>) = COPY $v9
+    %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv2i64
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv2i64
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %1:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv4i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv4i64
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv4i64
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    %1:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv8i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv8i64
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv8i64
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    %1:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
deleted file mode 100644
index b08361138c77d..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
+++ /dev/null
@@ -1,556 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
-# RUN: | FileCheck -check-prefix=RV32I %s
-
----
-name:            sub_nxv1s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv1s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s8>) = COPY $x10
-    %1:vrb(<vscale x 1 x s8>) = COPY $x11
-    %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv2s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s8>) = COPY $x10
-    %1:vrb(<vscale x 2 x s8>) = COPY $x11
-    %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv4s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s8>) = COPY $x10
-    %1:vrb(<vscale x 4 x s8>) = COPY $x11
-    %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv8s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s8>) = COPY $x10
-    %1:vrb(<vscale x 8 x s8>) = COPY $x11
-    %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv16s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv16s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s8>) = COPY $x10
-    %1:vrb(<vscale x 16 x s8>) = COPY $x11
-    %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 16 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv32s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv32s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s8>) = COPY $x10
-    %1:vrb(<vscale x 32 x s8>) = COPY $x11
-    %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 32 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv64s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv64s8
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 64 x s8>) = COPY $x10
-    %1:vrb(<vscale x 64 x s8>) = COPY $x11
-    %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 64 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv1s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv1s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s16>) = COPY $x10
-    %1:vrb(<vscale x 1 x s16>) = COPY $x11
-    %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv2s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s16>) = COPY $x10
-    %1:vrb(<vscale x 2 x s16>) = COPY $x11
-    %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv4s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s16>) = COPY $x10
-    %1:vrb(<vscale x 4 x s16>) = COPY $x11
-    %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv8s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s16>) = COPY $x10
-    %1:vrb(<vscale x 8 x s16>) = COPY $x11
-    %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv16s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv16s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s16>) = COPY $x10
-    %1:vrb(<vscale x 16 x s16>) = COPY $x11
-    %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 16 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv32s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv32s16
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s16>) = COPY $x10
-    %1:vrb(<vscale x 32 x s16>) = COPY $x11
-    %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 32 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv1s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv1s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s32>) = COPY $x10
-    %1:vrb(<vscale x 1 x s32>) = COPY $x11
-    %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv2s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s32>) = COPY $x10
-    %1:vrb(<vscale x 2 x s32>) = COPY $x11
-    %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv4s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s32>) = COPY $x10
-    %1:vrb(<vscale x 4 x s32>) = COPY $x11
-    %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv8s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s32>) = COPY $x10
-    %1:vrb(<vscale x 8 x s32>) = COPY $x11
-    %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv16s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv16s32
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s32>) = COPY $x10
-    %1:vrb(<vscale x 16 x s32>) = COPY $x11
-    %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 16 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv1s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv1s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s64>) = COPY $x10
-    %1:vrb(<vscale x 1 x s64>) = COPY $x11
-    %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv2s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s64>) = COPY $x10
-    %1:vrb(<vscale x 2 x s64>) = COPY $x11
-    %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv4s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s64>) = COPY $x10
-    %1:vrb(<vscale x 4 x s64>) = COPY $x11
-    %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV32I-LABEL: name: sub_nxv8s64
-    ; RV32I: liveins: $x10, $x11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV32I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s64>) = COPY $x10
-    %1:vrb(<vscale x 8 x s64>) = COPY $x11
-    %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
deleted file mode 100644
index 71150f49a4f08..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
+++ /dev/null
@@ -1,556 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
-# RUN: | FileCheck -check-prefix=RV64I %s
-
----
-name:            sub_nxv1s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv1s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s8>) = COPY $x10
-    %1:vrb(<vscale x 1 x s8>) = COPY $x11
-    %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv2s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s8>) = COPY $x10
-    %1:vrb(<vscale x 2 x s8>) = COPY $x11
-    %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv4s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s8>) = COPY $x10
-    %1:vrb(<vscale x 4 x s8>) = COPY $x11
-    %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv8s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s8>) = COPY $x10
-    %1:vrb(<vscale x 8 x s8>) = COPY $x11
-    %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv16s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv16s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s8>) = COPY $x10
-    %1:vrb(<vscale x 16 x s8>) = COPY $x11
-    %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 16 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv32s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv32s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s8>) = COPY $x10
-    %1:vrb(<vscale x 32 x s8>) = COPY $x11
-    %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 32 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv64s8
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv64s8
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 64 x s8>) = COPY $x10
-    %1:vrb(<vscale x 64 x s8>) = COPY $x11
-    %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 64 x s8>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv1s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv1s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s16>) = COPY $x10
-    %1:vrb(<vscale x 1 x s16>) = COPY $x11
-    %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv2s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s16>) = COPY $x10
-    %1:vrb(<vscale x 2 x s16>) = COPY $x11
-    %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv4s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s16>) = COPY $x10
-    %1:vrb(<vscale x 4 x s16>) = COPY $x11
-    %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv8s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s16>) = COPY $x10
-    %1:vrb(<vscale x 8 x s16>) = COPY $x11
-    %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv16s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv16s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s16>) = COPY $x10
-    %1:vrb(<vscale x 16 x s16>) = COPY $x11
-    %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 16 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv32s16
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv32s16
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 32 x s16>) = COPY $x10
-    %1:vrb(<vscale x 32 x s16>) = COPY $x11
-    %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 32 x s16>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv1s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv1s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s32>) = COPY $x10
-    %1:vrb(<vscale x 1 x s32>) = COPY $x11
-    %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv2s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s32>) = COPY $x10
-    %1:vrb(<vscale x 2 x s32>) = COPY $x11
-    %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv4s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s32>) = COPY $x10
-    %1:vrb(<vscale x 4 x s32>) = COPY $x11
-    %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv8s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s32>) = COPY $x10
-    %1:vrb(<vscale x 8 x s32>) = COPY $x11
-    %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv16s32
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv16s32
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 16 x s32>) = COPY $x10
-    %1:vrb(<vscale x 16 x s32>) = COPY $x11
-    %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 16 x s32>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv1s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv1s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 1 x s64>) = COPY $x10
-    %1:vrb(<vscale x 1 x s64>) = COPY $x11
-    %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 1 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv2s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv2s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 2 x s64>) = COPY $x10
-    %1:vrb(<vscale x 2 x s64>) = COPY $x11
-    %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 2 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv4s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv4s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 4 x s64>) = COPY $x10
-    %1:vrb(<vscale x 4 x s64>) = COPY $x11
-    %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 4 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-name:            sub_nxv8s64
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $x10, $x11
-
-    ; RV64I-LABEL: name: sub_nxv8s64
-    ; RV64I: liveins: $x10, $x11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
-    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
-    ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
-    ; RV64I-NEXT: PseudoRET implicit $x10
-    %0:vrb(<vscale x 8 x s64>) = COPY $x10
-    %1:vrb(<vscale x 8 x s64>) = COPY $x11
-    %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
-    $x10 = COPY %2(<vscale x 8 x s64>)
-    PseudoRET implicit $x10
-
-...
----
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir
new file mode 100644
index 0000000000000..9f35ba9ef6c07
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir
@@ -0,0 +1,774 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+---
+name:            test_nxv1i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s8>) = COPY $v8
+    %1:vrb(<vscale x 1 x s8>) = COPY $v9
+    %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv2i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv2i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 2 x s8>) = COPY $v8
+    %1:vrb(<vscale x 2 x s8>) = COPY $v9
+    %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv4i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv4i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv4i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 4 x s8>) = COPY $v8
+    %1:vrb(<vscale x 4 x s8>) = COPY $v9
+    %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv8i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv8i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv8i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 8 x s8>) = COPY $v8
+    %1:vrb(<vscale x 8 x s8>) = COPY $v9
+    %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv16i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv16i8
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv16i8
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %1:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv32i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv32i8
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv32i8
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    %1:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv64i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv64i8
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv64i8
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    %1:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_nxv1i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s16>) = COPY $v8
+    %1:vrb(<vscale x 1 x s16>) = COPY $v9
+    %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv2i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv2i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 2 x s16>) = COPY $v8
+    %1:vrb(<vscale x 2 x s16>) = COPY $v9
+    %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv4i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv4i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 4 x s16>) = COPY $v8
+    %1:vrb(<vscale x 4 x s16>) = COPY $v9
+    %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv8i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv8i16
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv8i16
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    %1:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv16i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv16i16
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv16i16
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    %1:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv32i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv32i16
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv32i16
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    %1:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_nxv1i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s32>) = COPY $v8
+    %1:vrb(<vscale x 1 x s32>) = COPY $v9
+    %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv2i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv2i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 2 x s32>) = COPY $v8
+    %1:vrb(<vscale x 2 x s32>) = COPY $v9
+    %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv4i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv4i32
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv4i32
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    %1:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv8i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv8i32
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv8i32
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    %1:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv16i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv16i32
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv16i32
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    %1:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            test_nxv1i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: test_nxv1i64
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: test_nxv1i64
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:vrb(<vscale x 1 x s64>) = COPY $v8
+    %1:vrb(<vscale x 1 x s64>) = COPY $v9
+    %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            test_nxv2i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: test_nxv2i64
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: test_nxv2i64
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %1:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            test_nxv4i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: test_nxv4i64
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: test_nxv4i64
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    %1:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            test_nxv8i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: test_nxv8i64
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: test_nxv8i64
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+    ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+    ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    %1:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+

>From 177d866b655732e5cd482cc0d8136b179c6891f4 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 12 Dec 2023 12:42:23 -0500
Subject: [PATCH 07/12] partial mapping VRB64 is for LMUL=1, MF2, MF4, and MF8

---
 llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index fe3038570357d..3fcceb3f07358 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -60,7 +60,7 @@ const RegisterBankInfo::ValueMapping ValueMappings[] = {
     {&PartMappings[PMI_FPRB64], 1},
     {&PartMappings[PMI_FPRB64], 1},
     {&PartMappings[PMI_FPRB64], 1},
-    // Maximum 3 VR LMUL=1 operands.
+    // Maximum 3 VR LMUL={1, MF2, MF4, MF8} operands.
     {&PartMappings[PMI_VRB64], 1},
     {&PartMappings[PMI_VRB64], 1},
     {&PartMappings[PMI_VRB64], 1},

>From 12ab39a405812ef897c520c6c413e018103f306b Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 15 Dec 2023 12:49:36 -0500
Subject: [PATCH 08/12] clang format

---
 llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 0b8d483e5923d..4cb1d01f3e8ca 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -281,7 +281,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
     }
 
     const LLT Ty = MRI.getType(VReg);
-    if (Ty.isValid() && TypeSize::isKnownGT(Ty.getSizeInBits(), TRI.getRegSizeInBits(*RC))) {
+    if (Ty.isValid() &&
+        TypeSize::isKnownGT(Ty.getSizeInBits(), TRI.getRegSizeInBits(*RC))) {
       reportGISelFailure(
           MF, TPC, MORE, "gisel-select",
           "VReg's low-level type and register class have different sizes", *MI);

>From 944dd0da8bab1804c29a1650705ae2853bdd1114 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 2 Jan 2024 22:55:57 -0500
Subject: [PATCH 09/12] delete todo

---
 llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp | 2 --
 1 file changed, 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index b7608e7340635..5738f86e7e9ff 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -844,8 +844,6 @@ const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
       return &RISCV::FPR64RegClass;
   }
 
-  // TODO: Non-GPR register classes.
-
   if (RB.getID() == RISCV::VRBRegBankID) {
     if (Ty.getSizeInBits().getKnownMinValue() <= 64)
       return &RISCV::VRRegClass;

>From fda95650edd462c1bd6096da784513ee13081654 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 2 Jan 2024 22:56:25 -0500
Subject: [PATCH 10/12] put partial mappings to single lines

---
 llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 3fcceb3f07358..a4c7fdf83a958 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -25,9 +25,14 @@ namespace llvm {
 namespace RISCV {
 
 const RegisterBankInfo::PartialMapping PartMappings[] = {
-    {0, 32, GPRBRegBank}, {0, 64, GPRBRegBank}, {0, 32, FPRBRegBank},
-    {0, 64, FPRBRegBank}, {0, 64, VRBRegBank},  {0, 128, VRBRegBank},
-    {0, 256, VRBRegBank}, {0, 512, VRBRegBank},
+    {0, 32, GPRBRegBank},
+    {0, 64, GPRBRegBank},
+    {0, 32, FPRBRegBank},
+    {0, 64, FPRBRegBank},
+    {0, 64, VRBRegBank},
+    {0, 128, VRBRegBank},
+    {0, 256, VRBRegBank},
+    {0, 512, VRBRegBank},
 };
 
 enum PartialMappingIdx {

>From 766a7aa4000162f3bf8c72172f5bf3f587862ac9 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Thu, 11 Jan 2024 01:35:54 -0500
Subject: [PATCH 11/12] regbankselect test cases for vectorized G_ADD and G_SUB

---
 .../GlobalISel/regbankselect/rvv/add.mir      | 711 ++++++++++++++++++
 .../GlobalISel/regbankselect/rvv/sub.mir      | 711 ++++++++++++++++++
 .../regbankselect/vec-add-sub-rv32.mir        | 511 -------------
 .../regbankselect/vec-add-sub-rv64.mir        | 511 -------------
 4 files changed, 1422 insertions(+), 1022 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
new file mode 100644
index 0000000000000..049060b79bf6f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
@@ -0,0 +1,711 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+---
+name:            vadd_vv_nxv1i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv1i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv1i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv2i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv2i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv2i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv4i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv4i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv4i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv8i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv8i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv8i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv16i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vadd_vv_nxv16i8
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv16i8
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vadd_vv_nxv32i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vadd_vv_nxv32i8
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv32i8
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vadd_vv_nxv64i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vadd_vv_nxv64i8
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv64i8
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vadd_vv_nxv1i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv1i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv1i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv2i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv2i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv2i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv4i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv4i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv4i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv8i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vadd_vv_nxv8i16
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv8i16
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vadd_vv_nxv16i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vadd_vv_nxv16i16
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv16i16
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vadd_vv_nxv32i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vadd_vv_nxv32i16
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv32i16
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vadd_vv_nxv1i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv1i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv1i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv2i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv2i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv2i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv4i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vadd_vv_nxv4i32
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv4i32
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vadd_vv_nxv8i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vadd_vv_nxv8i32
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv8i32
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vadd_vv_nxv16i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vadd_vv_nxv16i32
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv16i32
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vadd_vv_nxv1i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vadd_vv_nxv1i64
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv1i64
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vadd_vv_nxv2i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vadd_vv_nxv2i64
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv2i64
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
+    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vadd_vv_nxv4i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vadd_vv_nxv4i64
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv4i64
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vadd_vv_nxv8i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vadd_vv_nxv8i64
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vadd_vv_nxv8i64
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
+    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
new file mode 100644
index 0000000000000..d8580c09761ff
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
@@ -0,0 +1,711 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+---
+name:            vsub_vv_nxv1i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv1i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv1i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv2i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv2i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv2i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv4i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv4i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv4i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv8i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv8i8
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv8i8
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv16i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vsub_vv_nxv16i8
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv16i8
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
+    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vsub_vv_nxv32i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vsub_vv_nxv32i8
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv32i8
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vsub_vv_nxv64i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vsub_vv_nxv64i8
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv64i8
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
+    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vsub_vv_nxv1i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv1i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv1i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv2i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv2i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv2i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv4i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv4i16
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv4i16
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv8i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vsub_vv_nxv8i16
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv8i16
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vsub_vv_nxv16i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vsub_vv_nxv16i16
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv16i16
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
+    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vsub_vv_nxv32i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vsub_vv_nxv32i16
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv32i16
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vsub_vv_nxv1i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv1i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv1i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv2i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv2i32
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv2i32
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv4i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vsub_vv_nxv4i32
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv4i32
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
+    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vsub_vv_nxv8i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vsub_vv_nxv8i32
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv8i32
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vsub_vv_nxv16i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vsub_vv_nxv16i32
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv16i32
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
+    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vsub_vv_nxv1i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8, $v9
+
+    ; RV32I-LABEL: name: vsub_vv_nxv1i64
+    ; RV32I: liveins: $v8, $v9
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv1i64
+    ; RV64I: liveins: $v8, $v9
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vsub_vv_nxv2i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m2, $v10m2
+
+    ; RV32I-LABEL: name: vsub_vv_nxv2i64
+    ; RV32I: liveins: $v8m2, $v10m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv2i64
+    ; RV64I: liveins: $v8m2, $v10m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vsub_vv_nxv4i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m4, $v12m4
+
+    ; RV32I-LABEL: name: vsub_vv_nxv4i64
+    ; RV32I: liveins: $v8m4, $v12m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv4i64
+    ; RV64I: liveins: $v8m4, $v12m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
+    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vsub_vv_nxv8i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $v8m8, $v16m8
+
+    ; RV32I-LABEL: name: vsub_vv_nxv8i64
+    ; RV32I: liveins: $v8m8, $v16m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vsub_vv_nxv8i64
+    ; RV64I: liveins: $v8m8, $v16m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir
deleted file mode 100644
index d45009a417297..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv32.mir
+++ /dev/null
@@ -1,511 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
-# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
-
----
-name:            add_nxv1s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv1s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s8>) = COPY $v10
-    %1:_(<vscale x 1 x s8>) = COPY $v11
-    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv2s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s8>) = COPY $v10
-    %1:_(<vscale x 2 x s8>) = COPY $v11
-    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv4s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s8>) = COPY $v10
-    %1:_(<vscale x 4 x s8>) = COPY $v11
-    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv8s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s8>) = COPY $v10
-    %1:_(<vscale x 8 x s8>) = COPY $v11
-    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv16s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv16s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 16 x s8>) = COPY $v10
-    %1:_(<vscale x 16 x s8>) = COPY $v11
-    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 16 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv32s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv32s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 32 x s8>) = COPY $v10
-    %1:_(<vscale x 32 x s8>) = COPY $v11
-    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 32 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv64s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv64s8
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 64 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 64 x s8>) = COPY $v10
-    %1:_(<vscale x 64 x s8>) = COPY $v11
-    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 64 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv1s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv1s16
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s16>) = COPY $v10
-    %1:_(<vscale x 1 x s16>) = COPY $v11
-    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv2s16
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s16>) = COPY $v10
-    %1:_(<vscale x 2 x s16>) = COPY $v11
-    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv4s16
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s16>) = COPY $v10
-    %1:_(<vscale x 4 x s16>) = COPY $v11
-    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv8s16
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s16>) = COPY $v10
-    %1:_(<vscale x 8 x s16>) = COPY $v11
-    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv16s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv16s16
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 16 x s16>) = COPY $v10
-    %1:_(<vscale x 16 x s16>) = COPY $v11
-    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 16 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv32s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv32s16
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 32 x s16>) = COPY $v10
-    %1:_(<vscale x 32 x s16>) = COPY $v11
-    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 32 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv1s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv1s32
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s32>) = COPY $v10
-    %1:_(<vscale x 1 x s32>) = COPY $v11
-    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv2s32
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s32>) = COPY $v10
-    %1:_(<vscale x 2 x s32>) = COPY $v11
-    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv4s32
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s32>) = COPY $v10
-    %1:_(<vscale x 4 x s32>) = COPY $v11
-    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv8s32
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s32>) = COPY $v10
-    %1:_(<vscale x 8 x s32>) = COPY $v11
-    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv16s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv16s32
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 16 x s32>) = COPY $v10
-    %1:_(<vscale x 16 x s32>) = COPY $v11
-    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 16 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv1s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv1s64
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s64>) = COPY $v10
-    %1:_(<vscale x 1 x s64>) = COPY $v11
-    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s64>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv2s64
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s64>) = COPY $v10
-    %1:_(<vscale x 2 x s64>) = COPY $v11
-    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s64>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: add_nxv4s64
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v11
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s64>) = COPY $v10
-    %1:_(<vscale x 4 x s64>) = COPY $v11
-    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s64>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV32I-LABEL: name: sub_nxv8s64
-    ; RV32I: liveins: $v10, $v11
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v11
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s64>) = COPY $v10
-    %1:_(<vscale x 8 x s64>) = COPY $v11
-    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s64>)
-    PseudoRET implicit $v10
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir
deleted file mode 100644
index 4233e0557ab43..0000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/vec-add-sub-rv64.mir
+++ /dev/null
@@ -1,511 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
-# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
-
----
-name:            add_nxv1s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv1s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s8>) = COPY $v10
-    %1:_(<vscale x 1 x s8>) = COPY $v11
-    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv2s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s8>) = COPY $v10
-    %1:_(<vscale x 2 x s8>) = COPY $v11
-    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv4s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s8>) = COPY $v10
-    %1:_(<vscale x 4 x s8>) = COPY $v11
-    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv8s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s8>) = COPY $v10
-    %1:_(<vscale x 8 x s8>) = COPY $v11
-    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv16s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv16s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 16 x s8>) = COPY $v10
-    %1:_(<vscale x 16 x s8>) = COPY $v11
-    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 16 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv32s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv32s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 32 x s8>) = COPY $v10
-    %1:_(<vscale x 32 x s8>) = COPY $v11
-    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 32 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv64s8
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv64s8
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 64 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 64 x s8>) = COPY $v10
-    %1:_(<vscale x 64 x s8>) = COPY $v11
-    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 64 x s8>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv1s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv1s16
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s16>) = COPY $v10
-    %1:_(<vscale x 1 x s16>) = COPY $v11
-    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv2s16
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s16>) = COPY $v10
-    %1:_(<vscale x 2 x s16>) = COPY $v11
-    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv4s16
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s16>) = COPY $v10
-    %1:_(<vscale x 4 x s16>) = COPY $v11
-    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv8s16
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s16>) = COPY $v10
-    %1:_(<vscale x 8 x s16>) = COPY $v11
-    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv16s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv16s16
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 16 x s16>) = COPY $v10
-    %1:_(<vscale x 16 x s16>) = COPY $v11
-    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 16 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv32s16
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv32s16
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 32 x s16>) = COPY $v10
-    %1:_(<vscale x 32 x s16>) = COPY $v11
-    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 32 x s16>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv1s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv1s32
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s32>) = COPY $v10
-    %1:_(<vscale x 1 x s32>) = COPY $v11
-    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv2s32
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s32>) = COPY $v10
-    %1:_(<vscale x 2 x s32>) = COPY $v11
-    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv4s32
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s32>) = COPY $v10
-    %1:_(<vscale x 4 x s32>) = COPY $v11
-    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv8s32
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s32>) = COPY $v10
-    %1:_(<vscale x 8 x s32>) = COPY $v11
-    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv16s32
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv16s32
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 16 x s32>) = COPY $v10
-    %1:_(<vscale x 16 x s32>) = COPY $v11
-    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 16 x s32>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv1s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv1s64
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 1 x s64>) = COPY $v10
-    %1:_(<vscale x 1 x s64>) = COPY $v11
-    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 1 x s64>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv2s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv2s64
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 2 x s64>) = COPY $v10
-    %1:_(<vscale x 2 x s64>) = COPY $v11
-    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 2 x s64>)
-    PseudoRET implicit $v10
-
-...
----
-name:            add_nxv4s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: add_nxv4s64
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v11
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[ADD]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 4 x s64>) = COPY $v10
-    %1:_(<vscale x 4 x s64>) = COPY $v11
-    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
-    $v10 = COPY %2(<vscale x 4 x s64>)
-    PseudoRET implicit $v10
-
-...
----
-name:            sub_nxv8s64
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0.entry:
-    liveins: $v10, $v11
-
-    ; RV64I-LABEL: name: sub_nxv8s64
-    ; RV64I: liveins: $v10, $v11
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v11
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v10 = COPY [[SUB]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v10
-    %0:_(<vscale x 8 x s64>) = COPY $v10
-    %1:_(<vscale x 8 x s64>) = COPY $v11
-    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
-    $v10 = COPY %2(<vscale x 8 x s64>)
-    PseudoRET implicit $v10
-
-...

>From 5d60e8d49dcb6445209270c0040943a750356f26 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 31 Jan 2024 14:35:05 -0500
Subject: [PATCH 12/12] clang format off to put PartMappings in single lines

---
 llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index a4c7fdf83a958..58c971aee2f4c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -25,6 +25,7 @@ namespace llvm {
 namespace RISCV {
 
 const RegisterBankInfo::PartialMapping PartMappings[] = {
+    // clang-format off
     {0, 32, GPRBRegBank},
     {0, 64, GPRBRegBank},
     {0, 32, FPRBRegBank},
@@ -33,6 +34,7 @@ const RegisterBankInfo::PartialMapping PartMappings[] = {
     {0, 128, VRBRegBank},
     {0, 256, VRBRegBank},
     {0, 512, VRBRegBank},
+    // clang-format on
 };
 
 enum PartialMappingIdx {



More information about the llvm-commits mailing list