[llvm] [RISCV][GISel] Instruction select for vector G_ADD, G_SUB (PR #74114)
Jiahan Xie via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 8 05:51:31 PST 2023
https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/74114
>From d58b0da4922361dfd223f361df0212e0f3ea68e2 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 5 Dec 2023 19:54:00 -0500
Subject: [PATCH 1/3] use isKnownGT to compare LLT and TargetRegister
---
llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index baea773cf528e..814bfc25394c7 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -277,7 +277,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
}
const LLT Ty = MRI.getType(VReg);
- if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
+ if (Ty.isValid() && TypeSize::isKnownGT(Ty.getSizeInBits(), TRI.getRegSizeInBits(*RC))) {
reportGISelFailure(
MF, TPC, MORE, "gisel-select",
"VReg's low-level type and register class have different sizes", *MI);
>From 194d2109bfad4f3318ddf61d3542c38977912356 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 08:23:48 -0500
Subject: [PATCH 2/3] getRegClassForTypeOnBank for vector register bank
---
.../RISCV/GISel/RISCVInstructionSelector.cpp | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 61bdbfc47d947..667bf2159c129 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -785,6 +785,22 @@ const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
}
// TODO: Non-GPR register classes.
+
+ if (RB.getID() == RISCV::VRBRegBankID) {
+ if (Ty.getSizeInBits().getKnownMinValue() <= 64) {
+ return &RISCV::VRRegClass;
+ }
+ if (Ty.getSizeInBits().getKnownMinValue() == 128) {
+ return &RISCV::VRM2RegClass;
+ }
+ if (Ty.getSizeInBits().getKnownMinValue() == 256) {
+ return &RISCV::VRM4RegClass;
+ }
+ if (Ty.getSizeInBits().getKnownMinValue() == 512) {
+ return &RISCV::VRM8RegClass;
+ }
+ }
+
return nullptr;
}
>From d797bfab89533794e96306fafd8cbe49d5296353 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 08:43:27 -0500
Subject: [PATCH 3/3] vector instr sel for add and sub
---
.../instruction-select/rvv/add-rv32.mir | 556 ++++++++++++++++++
.../instruction-select/rvv/add-rv64.mir | 556 ++++++++++++++++++
.../instruction-select/rvv/sub-rv32.mir | 556 ++++++++++++++++++
.../instruction-select/rvv/sub-rv64.mir | 556 ++++++++++++++++++
4 files changed, 2224 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
new file mode 100644
index 0000000000000..a21883f966d4c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv32.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name: add_nxv1s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv1s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s8>) = COPY $x10
+ %1:vrb(<vscale x 1 x s8>) = COPY $x11
+ %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv2s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s8>) = COPY $x10
+ %1:vrb(<vscale x 2 x s8>) = COPY $x11
+ %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv4s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s8>) = COPY $x10
+ %1:vrb(<vscale x 4 x s8>) = COPY $x11
+ %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv8s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s8>) = COPY $x10
+ %1:vrb(<vscale x 8 x s8>) = COPY $x11
+ %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv16s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv16s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s8>) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $x11
+ %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv32s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv32s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s8>) = COPY $x10
+ %1:vrb(<vscale x 32 x s8>) = COPY $x11
+ %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv64s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv64s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 64 x s8>) = COPY $x10
+ %1:vrb(<vscale x 64 x s8>) = COPY $x11
+ %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv1s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv1s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s16>) = COPY $x10
+ %1:vrb(<vscale x 1 x s16>) = COPY $x11
+ %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv2s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s16>) = COPY $x10
+ %1:vrb(<vscale x 2 x s16>) = COPY $x11
+ %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv4s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s16>) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $x11
+ %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv8s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s16>) = COPY $x10
+ %1:vrb(<vscale x 8 x s16>) = COPY $x11
+ %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv16s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv16s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s16>) = COPY $x10
+ %1:vrb(<vscale x 16 x s16>) = COPY $x11
+ %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv32s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv32s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s16>) = COPY $x10
+ %1:vrb(<vscale x 32 x s16>) = COPY $x11
+ %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv1s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv1s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s32>) = COPY $x10
+ %1:vrb(<vscale x 1 x s32>) = COPY $x11
+ %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv2s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s32>) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $x11
+ %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv4s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s32>) = COPY $x10
+ %1:vrb(<vscale x 4 x s32>) = COPY $x11
+ %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv8s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s32>) = COPY $x10
+ %1:vrb(<vscale x 8 x s32>) = COPY $x11
+ %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv16s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv16s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s32>) = COPY $x10
+ %1:vrb(<vscale x 16 x s32>) = COPY $x11
+ %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv1s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv1s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s64>) = COPY $x10
+ %1:vrb(<vscale x 1 x s64>) = COPY $x11
+ %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv2s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s64>) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $x11
+ %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv4s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s64>) = COPY $x10
+ %1:vrb(<vscale x 4 x s64>) = COPY $x11
+ %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: add_nxv8s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s64>) = COPY $x10
+ %1:vrb(<vscale x 8 x s64>) = COPY $x11
+ %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
new file mode 100644
index 0000000000000..572d24ee4c7be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/add-rv64.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name: add_nxv1s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv1s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF8_:%[0-9]+]]:vr = PseudoVADD_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s8>) = COPY $x10
+ %1:vrb(<vscale x 1 x s8>) = COPY $x11
+ %2:vrb(<vscale x 1 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv2s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s8>) = COPY $x10
+ %1:vrb(<vscale x 2 x s8>) = COPY $x11
+ %2:vrb(<vscale x 2 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv4s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s8>) = COPY $x10
+ %1:vrb(<vscale x 4 x s8>) = COPY $x11
+ %2:vrb(<vscale x 4 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv8s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s8>) = COPY $x10
+ %1:vrb(<vscale x 8 x s8>) = COPY $x11
+ %2:vrb(<vscale x 8 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv16s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv16s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s8>) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $x11
+ %2:vrb(<vscale x 16 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv32s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv32s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s8>) = COPY $x10
+ %1:vrb(<vscale x 32 x s8>) = COPY $x11
+ %2:vrb(<vscale x 32 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv64s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv64s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 64 x s8>) = COPY $x10
+ %1:vrb(<vscale x 64 x s8>) = COPY $x11
+ %2:vrb(<vscale x 64 x s8>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv1s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv1s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF4_:%[0-9]+]]:vr = PseudoVADD_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s16>) = COPY $x10
+ %1:vrb(<vscale x 1 x s16>) = COPY $x11
+ %2:vrb(<vscale x 1 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv2s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s16>) = COPY $x10
+ %1:vrb(<vscale x 2 x s16>) = COPY $x11
+ %2:vrb(<vscale x 2 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv4s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s16>) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $x11
+ %2:vrb(<vscale x 4 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv8s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s16>) = COPY $x10
+ %1:vrb(<vscale x 8 x s16>) = COPY $x11
+ %2:vrb(<vscale x 8 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv16s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv16s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s16>) = COPY $x10
+ %1:vrb(<vscale x 16 x s16>) = COPY $x11
+ %2:vrb(<vscale x 16 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv32s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv32s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s16>) = COPY $x10
+ %1:vrb(<vscale x 32 x s16>) = COPY $x11
+ %2:vrb(<vscale x 32 x s16>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv1s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv1s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s32>) = COPY $x10
+ %1:vrb(<vscale x 1 x s32>) = COPY $x11
+ %2:vrb(<vscale x 1 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv2s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s32>) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $x11
+ %2:vrb(<vscale x 2 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv4s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s32>) = COPY $x10
+ %1:vrb(<vscale x 4 x s32>) = COPY $x11
+ %2:vrb(<vscale x 4 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv8s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s32>) = COPY $x10
+ %1:vrb(<vscale x 8 x s32>) = COPY $x11
+ %2:vrb(<vscale x 8 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv16s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv16s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s32>) = COPY $x10
+ %1:vrb(<vscale x 16 x s32>) = COPY $x11
+ %2:vrb(<vscale x 16 x s32>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv1s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv1s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s64>) = COPY $x10
+ %1:vrb(<vscale x 1 x s64>) = COPY $x11
+ %2:vrb(<vscale x 1 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv2s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv2s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M2_:%[0-9]+]]:vrm2 = PseudoVADD_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s64>) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $x11
+ %2:vrb(<vscale x 2 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv4s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv4s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M4_:%[0-9]+]]:vrm4 = PseudoVADD_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s64>) = COPY $x10
+ %1:vrb(<vscale x 4 x s64>) = COPY $x11
+ %2:vrb(<vscale x 4 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: add_nxv8s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: add_nxv8s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVADD_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s64>) = COPY $x10
+ %1:vrb(<vscale x 8 x s64>) = COPY $x11
+ %2:vrb(<vscale x 8 x s64>) = G_ADD %0, %1
+ $x10 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
new file mode 100644
index 0000000000000..b08361138c77d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv32.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name: sub_nxv1s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv1s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s8>) = COPY $x10
+ %1:vrb(<vscale x 1 x s8>) = COPY $x11
+ %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv2s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s8>) = COPY $x10
+ %1:vrb(<vscale x 2 x s8>) = COPY $x11
+ %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv4s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s8>) = COPY $x10
+ %1:vrb(<vscale x 4 x s8>) = COPY $x11
+ %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv8s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s8>) = COPY $x10
+ %1:vrb(<vscale x 8 x s8>) = COPY $x11
+ %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv16s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv16s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s8>) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $x11
+ %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv32s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv32s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s8>) = COPY $x10
+ %1:vrb(<vscale x 32 x s8>) = COPY $x11
+ %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv64s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv64s8
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 64 x s8>) = COPY $x10
+ %1:vrb(<vscale x 64 x s8>) = COPY $x11
+ %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv1s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv1s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s16>) = COPY $x10
+ %1:vrb(<vscale x 1 x s16>) = COPY $x11
+ %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv2s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s16>) = COPY $x10
+ %1:vrb(<vscale x 2 x s16>) = COPY $x11
+ %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv4s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s16>) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $x11
+ %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv8s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s16>) = COPY $x10
+ %1:vrb(<vscale x 8 x s16>) = COPY $x11
+ %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv16s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv16s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s16>) = COPY $x10
+ %1:vrb(<vscale x 16 x s16>) = COPY $x11
+ %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv32s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv32s16
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s16>) = COPY $x10
+ %1:vrb(<vscale x 32 x s16>) = COPY $x11
+ %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv1s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv1s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s32>) = COPY $x10
+ %1:vrb(<vscale x 1 x s32>) = COPY $x11
+ %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv2s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s32>) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $x11
+ %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv4s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s32>) = COPY $x10
+ %1:vrb(<vscale x 4 x s32>) = COPY $x11
+ %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv8s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s32>) = COPY $x10
+ %1:vrb(<vscale x 8 x s32>) = COPY $x11
+ %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv16s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv16s32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s32>) = COPY $x10
+ %1:vrb(<vscale x 16 x s32>) = COPY $x11
+ %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv1s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv1s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s64>) = COPY $x10
+ %1:vrb(<vscale x 1 x s64>) = COPY $x11
+ %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv2s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s64>) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $x11
+ %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv4s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s64>) = COPY $x10
+ %1:vrb(<vscale x 4 x s64>) = COPY $x11
+ %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: sub_nxv8s64
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s64>) = COPY $x10
+ %1:vrb(<vscale x 8 x s64>) = COPY $x11
+ %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
new file mode 100644
index 0000000000000..71150f49a4f08
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sub-rv64.mir
@@ -0,0 +1,556 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name: sub_nxv1s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv1s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s8>) = COPY $x10
+ %1:vrb(<vscale x 1 x s8>) = COPY $x11
+ %2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv2s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s8>) = COPY $x10
+ %1:vrb(<vscale x 2 x s8>) = COPY $x11
+ %2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv4s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s8>) = COPY $x10
+ %1:vrb(<vscale x 4 x s8>) = COPY $x11
+ %2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv8s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s8>) = COPY $x10
+ %1:vrb(<vscale x 8 x s8>) = COPY $x11
+ %2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv16s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv16s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s8>) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $x11
+ %2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv32s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv32s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s8>) = COPY $x10
+ %1:vrb(<vscale x 32 x s8>) = COPY $x11
+ %2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv64s8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv64s8
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 64 x s8>) = COPY $x10
+ %1:vrb(<vscale x 64 x s8>) = COPY $x11
+ %2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv1s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv1s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s16>) = COPY $x10
+ %1:vrb(<vscale x 1 x s16>) = COPY $x11
+ %2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv2s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s16>) = COPY $x10
+ %1:vrb(<vscale x 2 x s16>) = COPY $x11
+ %2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv4s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s16>) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $x11
+ %2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv8s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s16>) = COPY $x10
+ %1:vrb(<vscale x 8 x s16>) = COPY $x11
+ %2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv16s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv16s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s16>) = COPY $x10
+ %1:vrb(<vscale x 16 x s16>) = COPY $x11
+ %2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv32s16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv32s16
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 32 x s16>) = COPY $x10
+ %1:vrb(<vscale x 32 x s16>) = COPY $x11
+ %2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv1s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv1s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s32>) = COPY $x10
+ %1:vrb(<vscale x 1 x s32>) = COPY $x11
+ %2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv2s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s32>) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $x11
+ %2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv4s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s32>) = COPY $x10
+ %1:vrb(<vscale x 4 x s32>) = COPY $x11
+ %2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv8s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s32>) = COPY $x10
+ %1:vrb(<vscale x 8 x s32>) = COPY $x11
+ %2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv16s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv16s32
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 16 x s32>) = COPY $x10
+ %1:vrb(<vscale x 16 x s32>) = COPY $x11
+ %2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv1s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv1s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 1 x s64>) = COPY $x10
+ %1:vrb(<vscale x 1 x s64>) = COPY $x11
+ %2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv2s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv2s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 2 x s64>) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $x11
+ %2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv4s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv4s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 4 x s64>) = COPY $x10
+ %1:vrb(<vscale x 4 x s64>) = COPY $x11
+ %2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+name: sub_nxv8s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: sub_nxv8s64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $x11
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $x10 = COPY [[PseudoVSUB_VV_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ %0:vrb(<vscale x 8 x s64>) = COPY $x10
+ %1:vrb(<vscale x 8 x s64>) = COPY $x11
+ %2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
+ $x10 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $x10
+
+...
+---
+
More information about the llvm-commits
mailing list