[llvm] e42a70a - [RISCV][GISel] IRTranslate and Legalize some instructions with scalable vector type
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 9 20:51:48 PST 2024
Author: jiahanxie353
Date: 2024-01-09T21:51:30-07:00
New Revision: e42a70afab47a7a9e76a40bb553eee458a5f18ae
URL: https://github.com/llvm/llvm-project/commit/e42a70afab47a7a9e76a40bb553eee458a5f18ae
DIFF: https://github.com/llvm/llvm-project/commit/e42a70afab47a7a9e76a40bb553eee458a5f18ae.diff
LOG: [RISCV][GISel] IRTranslate and Legalize some instructions with scalable vector type
* Add IRTranslate tests for ADD, SUB, AND, OR, and XOR with scalable
vector types to show that they work as expected.
* Legalize G_ADD, G_SUB, G_AND, G_OR, and G_XOR of scalable vector
type for the RISC-V vector extension.
Added:
llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
Modified:
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index ab8070772fe543..ae02e86baf6e8b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -47,10 +47,50 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
+ const LLT nxv1s8 = LLT::scalable_vector(1, s8);
+ const LLT nxv2s8 = LLT::scalable_vector(2, s8);
+ const LLT nxv4s8 = LLT::scalable_vector(4, s8);
+ const LLT nxv8s8 = LLT::scalable_vector(8, s8);
+ const LLT nxv16s8 = LLT::scalable_vector(16, s8);
+ const LLT nxv32s8 = LLT::scalable_vector(32, s8);
+ const LLT nxv64s8 = LLT::scalable_vector(64, s8);
+
+ const LLT nxv1s16 = LLT::scalable_vector(1, s16);
+ const LLT nxv2s16 = LLT::scalable_vector(2, s16);
+ const LLT nxv4s16 = LLT::scalable_vector(4, s16);
+ const LLT nxv8s16 = LLT::scalable_vector(8, s16);
+ const LLT nxv16s16 = LLT::scalable_vector(16, s16);
+ const LLT nxv32s16 = LLT::scalable_vector(32, s16);
+
+ const LLT nxv1s32 = LLT::scalable_vector(1, s32);
+ const LLT nxv2s32 = LLT::scalable_vector(2, s32);
+ const LLT nxv4s32 = LLT::scalable_vector(4, s32);
+ const LLT nxv8s32 = LLT::scalable_vector(8, s32);
+ const LLT nxv16s32 = LLT::scalable_vector(16, s32);
+
+ const LLT nxv1s64 = LLT::scalable_vector(1, s64);
+ const LLT nxv2s64 = LLT::scalable_vector(2, s64);
+ const LLT nxv4s64 = LLT::scalable_vector(4, s64);
+ const LLT nxv8s64 = LLT::scalable_vector(8, s64);
+
using namespace TargetOpcode;
+ auto AllVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
+ nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
+ nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
+ nxv1s64, nxv2s64, nxv4s64, nxv8s64};
+
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
.legalFor({s32, sXLen})
+ .legalIf(all(
+ typeInSet(0, AllVecTys),
+ LegalityPredicate([=, &ST](const LegalityQuery &Query) {
+ return ST.hasVInstructions() &&
+ (Query.Types[0].getScalarSizeInBits() != 64 ||
+ ST.hasVInstructionsI64()) &&
+ (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
+ ST.getELen() == 64);
+ })))
.widenScalarToNextPow2(0)
.clampScalar(0, s32, sXLen);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6a2d21b555cc2b..90d648dab2ae81 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20019,8 +20019,13 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
}
bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
- // At the moment, the only scalable instruction GISel knows how to lower is
- // ret with scalable argument.
+
+ // GISel support is in progress or complete for G_ADD, G_SUB, G_AND, G_OR, and
+ // G_XOR.
+ unsigned Op = Inst.getOpcode();
+ if (Op == Instruction::Add || Op == Instruction::Sub ||
+ Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor)
+ return false;
if (Inst.getType()->isScalableTy())
return true;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll
new file mode 100644
index 00000000000000..f5e81718226c69
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV32I
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV64I
+
+define void @add_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+ ; CHECK-LABEL: name: add_nxv2i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $v8, $v9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoRET
+ %c = add <vscale x 2 x i32> %a, %b
+ ret void
+}
+
+define void @sub_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+ ; CHECK-LABEL: name: sub_nxv2i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $v8, $v9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoRET
+ %c = sub <vscale x 2 x i32> %a, %b
+ ret void
+}
+
+define void @and_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+ ; CHECK-LABEL: name: and_nxv2i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $v8, $v9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoRET
+ %c = and <vscale x 2 x i32> %a, %b
+ ret void
+}
+
+define void @or_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+ ; CHECK-LABEL: name: or_nxv2i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $v8, $v9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoRET
+ %c = or <vscale x 2 x i32> %a, %b
+ ret void
+}
+
+define void @xor_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+ ; CHECK-LABEL: name: xor_nxv2i32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $v8, $v9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoRET
+ %c = xor <vscale x 2 x i32> %a, %b
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
new file mode 100644
index 00000000000000..85ad899a5a91c4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
@@ -0,0 +1,274 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv2i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv64i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv2i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv2i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
new file mode 100644
index 00000000000000..aa0ab96f8ded18
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
@@ -0,0 +1,399 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv64i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
new file mode 100644
index 00000000000000..8295e55c079c17
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
@@ -0,0 +1,399 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv64i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
new file mode 100644
index 00000000000000..22c2258f2b920c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
@@ -0,0 +1,399 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv64i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
new file mode 100644
index 00000000000000..eb961b8aa30349
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
@@ -0,0 +1,399 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv64i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
new file mode 100644
index 00000000000000..4de02b1a04da48
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
@@ -0,0 +1,399 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv16i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %1:_(<vscale x 16 x s8>) = COPY $v10m2
+ %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv32i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %1:_(<vscale x 32 x s8>) = COPY $v12m4
+ %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv64i8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv64i8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = COPY $v8m8
+ %1:_(<vscale x 64 x s8>) = COPY $v16m8
+ %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+ $v8m8 = COPY %2(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv8i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(<vscale x 8 x s16>) = COPY $v10m2
+ %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv16i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %1:_(<vscale x 16 x s16>) = COPY $v12m4
+ %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv32i16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv32i16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %1:_(<vscale x 32 x s16>) = COPY $v16m8
+ %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+ $v8m8 = COPY %2(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv4i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(<vscale x 4 x s32>) = COPY $v10m2
+ %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv8i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(<vscale x 8 x s32>) = COPY $v12m4
+ %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv16i32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv16i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %1:_(<vscale x 16 x s32>) = COPY $v16m8
+ %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: test_nxv1i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv1i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: test_nxv2i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = COPY $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v10m2
+ %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: test_nxv4i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv4i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(<vscale x 4 x s64>) = COPY $v12m4
+ %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: test_nxv8i64
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_nxv8i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(<vscale x 8 x s64>) = COPY $v16m8
+ %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
More information about the llvm-commits
mailing list