[llvm] 10c2d5f - [RISCV][GISel] RegBank select and instruction select for vector G_ADD, G_SUB (#74114)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 1 12:06:48 PST 2024
Author: Jiahan Xie
Date: 2024-02-01T15:06:43-05:00
New Revision: 10c2d5ff7c6de8096c8f4c4621612970940f6dd3
URL: https://github.com/llvm/llvm-project/commit/10c2d5ff7c6de8096c8f4c4621612970940f6dd3
DIFF: https://github.com/llvm/llvm-project/commit/10c2d5ff7c6de8096c8f4c4621612970940f6dd3.diff
LOG: [RISCV][GISel] RegBank select and instruction select for vector G_ADD, G_SUB (#74114)
RegisterBank Selection for scalable vector G_ADD and G_SUB by creating
new mappings for different types of vector register banks.
Then implement Instruction Selection for the same operations by choosing
the correct RISC-V vector register class.
Added:
llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir
llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir
llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
Modified:
llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 30b2430249d23..4cb1d01f3e8ca 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -281,7 +281,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
}
const LLT Ty = MRI.getType(VReg);
- if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
+ if (Ty.isValid() &&
+ TypeSize::isKnownGT(Ty.getSizeInBits(), TRI.getRegSizeInBits(*RC))) {
reportGISelFailure(
MF, TPC, MORE, "gisel-select",
"VReg's low-level type and register class have
diff erent sizes", *MI);
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 65a0a3e0929fc..5738f86e7e9ff 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -844,7 +844,20 @@ const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
return &RISCV::FPR64RegClass;
}
- // TODO: Non-GPR register classes.
+ if (RB.getID() == RISCV::VRBRegBankID) {
+ if (Ty.getSizeInBits().getKnownMinValue() <= 64)
+ return &RISCV::VRRegClass;
+
+ if (Ty.getSizeInBits().getKnownMinValue() == 128)
+ return &RISCV::VRM2RegClass;
+
+ if (Ty.getSizeInBits().getKnownMinValue() == 256)
+ return &RISCV::VRM4RegClass;
+
+ if (Ty.getSizeInBits().getKnownMinValue() == 512)
+ return &RISCV::VRM8RegClass;
+ }
+
return nullptr;
}
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index cf0ff63a5e51c..58c971aee2f4c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -25,10 +25,16 @@ namespace llvm {
namespace RISCV {
const RegisterBankInfo::PartialMapping PartMappings[] = {
+ // clang-format off
{0, 32, GPRBRegBank},
{0, 64, GPRBRegBank},
{0, 32, FPRBRegBank},
{0, 64, FPRBRegBank},
+ {0, 64, VRBRegBank},
+ {0, 128, VRBRegBank},
+ {0, 256, VRBRegBank},
+ {0, 512, VRBRegBank},
+ // clang-format on
};
enum PartialMappingIdx {
@@ -36,6 +42,10 @@ enum PartialMappingIdx {
PMI_GPRB64 = 1,
PMI_FPRB32 = 2,
PMI_FPRB64 = 3,
+ PMI_VRB64 = 4,
+ PMI_VRB128 = 5,
+ PMI_VRB256 = 6,
+ PMI_VRB512 = 7,
};
const RegisterBankInfo::ValueMapping ValueMappings[] = {
@@ -57,6 +67,22 @@ const RegisterBankInfo::ValueMapping ValueMappings[] = {
{&PartMappings[PMI_FPRB64], 1},
{&PartMappings[PMI_FPRB64], 1},
{&PartMappings[PMI_FPRB64], 1},
+ // Maximum 3 VR LMUL={1, MF2, MF4, MF8} operands.
+ {&PartMappings[PMI_VRB64], 1},
+ {&PartMappings[PMI_VRB64], 1},
+ {&PartMappings[PMI_VRB64], 1},
+ // Maximum 3 VR LMUL=2 operands.
+ {&PartMappings[PMI_VRB128], 1},
+ {&PartMappings[PMI_VRB128], 1},
+ {&PartMappings[PMI_VRB128], 1},
+ // Maximum 3 VR LMUL=4 operands.
+ {&PartMappings[PMI_VRB256], 1},
+ {&PartMappings[PMI_VRB256], 1},
+ {&PartMappings[PMI_VRB256], 1},
+ // Maximum 3 VR LMUL=8 operands.
+ {&PartMappings[PMI_VRB512], 1},
+ {&PartMappings[PMI_VRB512], 1},
+ {&PartMappings[PMI_VRB512], 1},
};
enum ValueMappingIdx {
@@ -65,6 +91,10 @@ enum ValueMappingIdx {
GPRB64Idx = 4,
FPRB32Idx = 7,
FPRB64Idx = 10,
+ VRB64Idx = 13,
+ VRB128Idx = 16,
+ VRB256Idx = 19,
+ VRB512Idx = 22,
};
} // namespace RISCV
} // namespace llvm
@@ -215,6 +245,23 @@ bool RISCVRegisterBankInfo::anyUseOnlyUseFP(
[&](const MachineInstr &UseMI) { return onlyUsesFP(UseMI, MRI, TRI); });
}
+static const RegisterBankInfo::ValueMapping *getVRBValueMapping(unsigned Size) {
+ unsigned Idx;
+
+ if (Size <= 64)
+ Idx = RISCV::VRB64Idx;
+ else if (Size == 128)
+ Idx = RISCV::VRB128Idx;
+ else if (Size == 256)
+ Idx = RISCV::VRB256Idx;
+ else if (Size == 512)
+ Idx = RISCV::VRB512Idx;
+ else
+ llvm::report_fatal_error("Invalid Size");
+
+ return &RISCV::ValueMappings[Idx];
+}
+
const RegisterBankInfo::InstructionMapping &
RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
@@ -242,7 +289,16 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
switch (Opc) {
case TargetOpcode::G_ADD:
- case TargetOpcode::G_SUB:
+ case TargetOpcode::G_SUB: {
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ return getInstructionMapping(
+ DefaultMappingID, /*Cost=*/1,
+ getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()),
+ NumOperands);
+ }
+ }
+ LLVM_FALLTHROUGH;
case TargetOpcode::G_SHL:
case TargetOpcode::G_ASHR:
case TargetOpcode::G_LSHR:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir
new file mode 100644
index 0000000000000..5b493f6844c01
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add.mir
@@ -0,0 +1,774 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+---
+name: test_nxv1i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = COPY $v8
+ %1:vrb(<vscale x 1 x s8>) = COPY $v9
+ %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv2i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv2i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = COPY $v8
+ %1:vrb(<vscale x 2 x s8>) = COPY $v9
+ %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv4i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv4i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s8>) = COPY $v8
+ %1:vrb(<vscale x 4 x s8>) = COPY $v9
+ %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv8i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv8i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s8>) = COPY $v8
+ %1:vrb(<vscale x 8 x s8>) = COPY $v9
+ %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv16i8
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv16i8
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %1:vrb(<vscale x 16 x s8>) = COPY $v10m2
+ %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv32i8
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv32i8
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ %1:vrb(<vscale x 32 x s8>) = COPY $v12m4
+ %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv64i8
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv64i8
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ %1:vrb(<vscale x 64 x s8>) = COPY $v16m8
+ %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = COPY $v8
+ %1:vrb(<vscale x 1 x s16>) = COPY $v9
+ %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv2i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv2i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s16>) = COPY $v8
+ %1:vrb(<vscale x 2 x s16>) = COPY $v9
+ %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv4i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv4i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s16>) = COPY $v8
+ %1:vrb(<vscale x 4 x s16>) = COPY $v9
+ %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv8i16
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv8i16
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ %1:vrb(<vscale x 8 x s16>) = COPY $v10m2
+ %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv16i16
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv16i16
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ %1:vrb(<vscale x 16 x s16>) = COPY $v12m4
+ %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv32i16
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv32i16
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ %1:vrb(<vscale x 32 x s16>) = COPY $v16m8
+ %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s32>) = COPY $v8
+ %1:vrb(<vscale x 1 x s32>) = COPY $v9
+ %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv2i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv2i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s32>) = COPY $v8
+ %1:vrb(<vscale x 2 x s32>) = COPY $v9
+ %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv4i32
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv4i32
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ %1:vrb(<vscale x 4 x s32>) = COPY $v10m2
+ %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv8i32
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv8i32
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ %1:vrb(<vscale x 8 x s32>) = COPY $v12m4
+ %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv16i32
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv16i32
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ %1:vrb(<vscale x 16 x s32>) = COPY $v16m8
+ %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i64
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i64
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s64>) = COPY $v8
+ %1:vrb(<vscale x 1 x s64>) = COPY $v9
+ %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv2i64
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv2i64
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %1:vrb(<vscale x 2 x s64>) = COPY $v10m2
+ %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv4i64
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv4i64
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ %1:vrb(<vscale x 4 x s64>) = COPY $v12m4
+ %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv8i64
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv8i64
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ %1:vrb(<vscale x 8 x s64>) = COPY $v16m8
+ %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir
new file mode 100644
index 0000000000000..9f35ba9ef6c07
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub.mir
@@ -0,0 +1,774 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+---
+name: test_nxv1i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = COPY $v8
+ %1:vrb(<vscale x 1 x s8>) = COPY $v9
+ %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv2i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv2i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = COPY $v8
+ %1:vrb(<vscale x 2 x s8>) = COPY $v9
+ %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv4i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv4i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s8>) = COPY $v8
+ %1:vrb(<vscale x 4 x s8>) = COPY $v9
+ %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv8i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv8i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s8>) = COPY $v8
+ %1:vrb(<vscale x 8 x s8>) = COPY $v9
+ %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv16i8
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv16i8
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %1:vrb(<vscale x 16 x s8>) = COPY $v10m2
+ %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv32i8
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv32i8
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ %1:vrb(<vscale x 32 x s8>) = COPY $v12m4
+ %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv64i8
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv64i8
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ %1:vrb(<vscale x 64 x s8>) = COPY $v16m8
+ %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = COPY $v8
+ %1:vrb(<vscale x 1 x s16>) = COPY $v9
+ %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv2i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv2i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s16>) = COPY $v8
+ %1:vrb(<vscale x 2 x s16>) = COPY $v9
+ %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv4i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv4i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s16>) = COPY $v8
+ %1:vrb(<vscale x 4 x s16>) = COPY $v9
+ %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv8i16
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv8i16
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ %1:vrb(<vscale x 8 x s16>) = COPY $v10m2
+ %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv16i16
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv16i16
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ %1:vrb(<vscale x 16 x s16>) = COPY $v12m4
+ %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv32i16
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv32i16
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ %1:vrb(<vscale x 32 x s16>) = COPY $v16m8
+ %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s32>) = COPY $v8
+ %1:vrb(<vscale x 1 x s32>) = COPY $v9
+ %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv2i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv2i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s32>) = COPY $v8
+ %1:vrb(<vscale x 2 x s32>) = COPY $v9
+ %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv4i32
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv4i32
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ %1:vrb(<vscale x 4 x s32>) = COPY $v10m2
+ %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv8i32
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv8i32
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ %1:vrb(<vscale x 8 x s32>) = COPY $v12m4
+ %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv16i32
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv16i32
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ %1:vrb(<vscale x 16 x s32>) = COPY $v16m8
+ %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: test_nxv1i64
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: test_nxv1i64
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s64>) = COPY $v8
+ %1:vrb(<vscale x 1 x s64>) = COPY $v9
+ %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: test_nxv2i64
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: test_nxv2i64
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %1:vrb(<vscale x 2 x s64>) = COPY $v10m2
+ %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: test_nxv4i64
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: test_nxv4i64
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ %1:vrb(<vscale x 4 x s64>) = COPY $v12m4
+ %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: test_nxv8i64
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: test_nxv8i64
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ %1:vrb(<vscale x 8 x s64>) = COPY $v16m8
+ %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
new file mode 100644
index 0000000000000..049060b79bf6f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
@@ -0,0 +1,711 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+---
+name: vadd_vv_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv1i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv1i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv2i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv2i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv4i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv4i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv8i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv8i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vadd_vv_nxv16i8
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv16i8
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vadd_vv_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vadd_vv_nxv32i8
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv32i8
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vadd_vv_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vadd_vv_nxv64i8
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv64i8
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vadd_vv_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv1i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv1i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv2i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv2i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv4i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv4i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vadd_vv_nxv8i16
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv8i16
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vadd_vv_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vadd_vv_nxv16i16
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv16i16
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vadd_vv_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vadd_vv_nxv32i16
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv32i16
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vadd_vv_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv1i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv1i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv2i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv2i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vadd_vv_nxv4i32
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv4i32
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vadd_vv_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vadd_vv_nxv8i32
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv8i32
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vadd_vv_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vadd_vv_nxv16i32
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv16i32
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vadd_vv_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vadd_vv_nxv1i64
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv1i64
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vadd_vv_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vadd_vv_nxv2i64
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv2i64
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vadd_vv_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vadd_vv_nxv4i64
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv4i64
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vadd_vv_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vadd_vv_nxv8i64
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vadd_vv_nxv8i64
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
new file mode 100644
index 0000000000000..d8580c09761ff
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
@@ -0,0 +1,711 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+---
+name: vsub_vv_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv1i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv1i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv2i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv2i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv4i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv4i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv8i8
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv8i8
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vsub_vv_nxv16i8
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv16i8
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vsub_vv_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vsub_vv_nxv32i8
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv32i8
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vsub_vv_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vsub_vv_nxv64i8
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv64i8
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vsub_vv_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv1i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv1i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv2i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv2i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv4i16
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv4i16
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vsub_vv_nxv8i16
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv8i16
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vsub_vv_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vsub_vv_nxv16i16
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv16i16
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vsub_vv_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vsub_vv_nxv32i16
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv32i16
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vsub_vv_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv1i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv1i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv2i32
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv2i32
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vsub_vv_nxv4i32
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv4i32
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vsub_vv_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vsub_vv_nxv8i32
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv8i32
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vsub_vv_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vsub_vv_nxv16i32
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv16i32
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vsub_vv_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8, $v9
+
+ ; RV32I-LABEL: name: vsub_vv_nxv1i64
+ ; RV32I: liveins: $v8, $v9
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv1i64
+ ; RV64I: liveins: $v8, $v9
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vsub_vv_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $v10m2
+
+ ; RV32I-LABEL: name: vsub_vv_nxv2i64
+ ; RV32I: liveins: $v8m2, $v10m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv2i64
+ ; RV64I: liveins: $v8m2, $v10m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vsub_vv_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $v12m4
+
+ ; RV32I-LABEL: name: vsub_vv_nxv4i64
+ ; RV32I: liveins: $v8m4, $v12m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv4i64
+ ; RV64I: liveins: $v8m4, $v12m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vsub_vv_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8
+
+ ; RV32I-LABEL: name: vsub_vv_nxv8i64
+ ; RV32I: liveins: $v8m8, $v16m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vsub_vv_nxv8i64
+ ; RV64I: liveins: $v8m8, $v16m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
More information about the llvm-commits
mailing list