[llvm] [RISCV][GlobalISel] Legalize G_ADD, G_SUB, G_AND, G_OR, G_XOR on RISC-V Vector Extension (PR #71400)
Jiahan Xie via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 8 10:18:17 PST 2023
https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/71400
>From 1b9bc2e3525fcf55dcdeefb3ca6b4ac730543ee9 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 8 Nov 2023 19:22:21 -0500
Subject: [PATCH 1/6] [RISCV][GISEL] Legalize G_ADD, G_SUB, G_AND, G_OR, G_XOR;
G_ADD legalized
---
llvm/lib/CodeGen/MachineVerifier.cpp | 8 +
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 31 +-
.../legalizer/legalize-add-rv32.mir | 353 +++++++++++++++--
.../legalizer/legalize-add-rv64.mir | 366 ++++++++++++++++--
4 files changed, 707 insertions(+), 51 deletions(-)
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index aaf9bd740d137..b1334456026c8 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1946,6 +1946,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
SrcSize = TRI->getRegSizeInBits(*SrcRC);
}
+ if (SrcSize.isZero())
+ SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
+
if (DstReg.isPhysical() && SrcTy.isValid()) {
const TargetRegisterClass *DstRC =
TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
@@ -1966,6 +1969,11 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
!DstSize.isScalable())
break;
+ // If the Src is scalable and the Dst is fixed, then Dest can only hold
+ // the Src is known to fit in Dest
+ if (SrcSize.isScalable() && !DstSize.isScalable() &&
+ TypeSize::isKnownLE(DstSize, SrcSize))
+ break;
if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 570b9802a2f51..6c5f92c5ff818 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -46,10 +46,39 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
+ const LLT nxv1s8 = LLT::scalable_vector(1, s8);
+ const LLT nxv2s8 = LLT::scalable_vector(2, s8);
+ const LLT nxv4s8 = LLT::scalable_vector(4, s8);
+ const LLT nxv8s8 = LLT::scalable_vector(8, s8);
+ const LLT nxv16s8 = LLT::scalable_vector(16, s8);
+ const LLT nxv32s8 = LLT::scalable_vector(32, s8);
+ const LLT nxv64s8 = LLT::scalable_vector(64, s8);
+
+ const LLT nxv1s16 = LLT::scalable_vector(1, s16);
+ const LLT nxv2s16 = LLT::scalable_vector(2, s16);
+ const LLT nxv4s16 = LLT::scalable_vector(4, s16);
+ const LLT nxv8s16 = LLT::scalable_vector(8, s16);
+ const LLT nxv16s16 = LLT::scalable_vector(16, s16);
+ const LLT nxv32s16 = LLT::scalable_vector(32, s16);
+
+ const LLT nxv1s32 = LLT::scalable_vector(1, s32);
+ const LLT nxv2s32 = LLT::scalable_vector(2, s32);
+ const LLT nxv4s32 = LLT::scalable_vector(4, s32);
+ const LLT nxv8s32 = LLT::scalable_vector(8, s32);
+ const LLT nxv16s32 = LLT::scalable_vector(16, s32);
+
+ const LLT nxv1s64 = LLT::scalable_vector(1, s64);
+ const LLT nxv2s64 = LLT::scalable_vector(2, s64);
+ const LLT nxv4s64 = LLT::scalable_vector(4, s64);
+ const LLT nxv8s64 = LLT::scalable_vector(8, s64);
+
using namespace TargetOpcode;
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
- .legalFor({s32, sXLen})
+ .legalFor({s32, sXLen, nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8,
+ nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16,
+ nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
+ nxv1s64, nxv2s64, nxv4s64, nxv8s64})
.widenScalarToNextPow2(0)
.clampScalar(0, s32, sXLen);
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index d169eb316dfcb..2c63b92c91b4f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -142,29 +142,30 @@ body: |
---
name: add_i96
body: |
+ ; CHECK-LABEL: name: add_i96
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: %lo1:_(s32) = COPY $x10
+ ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
+ ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
+ ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
+ ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
+ ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
+ ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
bb.0.entry:
- ; CHECK-LABEL: name: add_i96
- ; CHECK: %lo1:_(s32) = COPY $x10
- ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
- ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
- ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
- ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
- ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
- ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
- ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
- ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
%hi1:_(s32) = COPY $x12
@@ -181,3 +182,311 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+ bb.0.entry:
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+ bb.0.entry:
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+ bb.0.entry:
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index f394e4d5064ed..b4eefb7354511 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -174,35 +174,36 @@ body: |
---
name: add_i192
body: |
+ ; CHECK-LABEL: name: add_i192
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: %lo1:_(s64) = COPY $x10
+ ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
+ ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
+ ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
+ ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
+ ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+ ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
+ ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
bb.0.entry:
- ; CHECK-LABEL: name: add_i192
- ; CHECK: %lo1:_(s64) = COPY $x10
- ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
- ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
- ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
- ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
- ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
- ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
- ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
- ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
%hi1:_(s64) = COPY $x12
@@ -219,3 +220,312 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+ bb.0.entry:
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+ bb.0.entry:
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+ bb.0.entry:
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
>From ca92934bc9d2d74b7ddf9e13f9c65db79db97144 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Thu, 9 Nov 2023 11:18:42 -0500
Subject: [PATCH 2/6] update G_ADD test cases; add test cases for G_SUB, G_AND,
G_OR, and G_XOR
---
.../legalizer/legalize-add-rv32.mir | 116 ++++---
.../legalizer/legalize-add-rv64.mir | 127 ++++---
.../legalizer/legalize-and-rv32.mir | 328 ++++++++++++++++++
.../legalizer/legalize-and-rv64.mir | 327 +++++++++++++++++
.../GlobalISel/legalizer/legalize-or-rv32.mir | 327 +++++++++++++++++
.../GlobalISel/legalizer/legalize-or-rv64.mir | 327 +++++++++++++++++
.../legalizer/legalize-sub-rv32.mir | 327 +++++++++++++++++
.../legalizer/legalize-sub-rv64.mir | 328 ++++++++++++++++++
.../legalizer/legalize-xor-rv32.mir | 327 +++++++++++++++++
.../legalizer/legalize-xor-rv64.mir | 327 +++++++++++++++++
10 files changed, 2757 insertions(+), 104 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index 2c63b92c91b4f..14869dbb99e0f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -142,30 +142,29 @@ body: |
---
name: add_i96
body: |
- ; CHECK-LABEL: name: add_i96
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: %lo1:_(s32) = COPY $x10
- ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
- ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
- ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
- ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
- ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
- ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
- ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
- ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
bb.0.entry:
+ ; CHECK-LABEL: name: add_i96
+ ; CHECK: %lo1:_(s32) = COPY $x10
+ ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
+ ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
+ ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
+ ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
+ ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
+ ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
%hi1:_(s32) = COPY $x12
@@ -183,9 +182,14 @@ body: |
...
---
-name: test_nxv1s8
-body: |
+name: test_nxv1s8
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
%0:_(<vscale x 1 x s8>) = COPY $v8
%1:_(<vscale x 1 x s8>) = COPY $v9
%2:_(<vscale x 1 x s8>) = G_ADD %0, %1
@@ -270,13 +274,12 @@ body: |
---
name: test_nxv64s8
body: |
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
%0:_(<vscale x 64 x s8>) = COPY $v8
%1:_(<vscale x 64 x s8>) = COPY $v9
%2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -284,9 +287,14 @@ body: |
...
---
-name: test_nxv1s16
-body: |
+name: test_nxv1s16
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
%0:_(<vscale x 1 x s16>) = COPY $v8
%1:_(<vscale x 1 x s16>) = COPY $v9
%2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -355,13 +363,12 @@ body: |
---
name: test_nxv32s16
body: |
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
%0:_(<vscale x 32 x s16>) = COPY $v8
%1:_(<vscale x 32 x s16>) = COPY $v9
%2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -369,9 +376,14 @@ body: |
...
---
-name: test_nxv1s32
-body: |
+name: test_nxv1s32
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
%0:_(<vscale x 1 x s32>) = COPY $v8
%1:_(<vscale x 1 x s32>) = COPY $v9
%2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -425,13 +437,12 @@ body: |
---
name: test_nxv16s32
body: |
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
%0:_(<vscale x 16 x s32>) = COPY $v8
%1:_(<vscale x 16 x s32>) = COPY $v9
%2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -439,9 +450,14 @@ body: |
...
---
-name: test_nxv1s64
-body: |
+name: test_nxv1s64
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
%0:_(<vscale x 1 x s64>) = COPY $v8
%1:_(<vscale x 1 x s64>) = COPY $v9
%2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -490,3 +506,5 @@ body: |
%1:_(<vscale x 8 x s64>) = COPY $v9
%2:_(<vscale x 8 x s64>) = G_ADD %0, %1
PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index b4eefb7354511..9df48ad2028c9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -174,36 +174,35 @@ body: |
---
name: add_i192
body: |
- ; CHECK-LABEL: name: add_i192
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: %lo1:_(s64) = COPY $x10
- ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
- ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
- ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
- ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
- ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
- ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
- ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
- ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
bb.0.entry:
+ ; CHECK-LABEL: name: add_i192
+ ; CHECK: %lo1:_(s64) = COPY $x10
+ ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
+ ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
+ ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
+ ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
+ ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+ ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
+ ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
%hi1:_(s64) = COPY $x12
@@ -221,9 +220,14 @@ body: |
...
---
-name: test_nxv1s8
-body: |
+name: test_nxv1s8
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
%0:_(<vscale x 1 x s8>) = COPY $v8
%1:_(<vscale x 1 x s8>) = COPY $v9
%2:_(<vscale x 1 x s8>) = G_ADD %0, %1
@@ -308,13 +312,12 @@ body: |
---
name: test_nxv64s8
body: |
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
%0:_(<vscale x 64 x s8>) = COPY $v8
%1:_(<vscale x 64 x s8>) = COPY $v9
%2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -322,9 +325,14 @@ body: |
...
---
-name: test_nxv1s16
-body: |
+name: test_nxv1s16
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
%0:_(<vscale x 1 x s16>) = COPY $v8
%1:_(<vscale x 1 x s16>) = COPY $v9
%2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -393,13 +401,12 @@ body: |
---
name: test_nxv32s16
body: |
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
%0:_(<vscale x 32 x s16>) = COPY $v8
%1:_(<vscale x 32 x s16>) = COPY $v9
%2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -407,9 +414,14 @@ body: |
...
---
-name: test_nxv1s32
-body: |
+name: test_nxv1s32
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
%0:_(<vscale x 1 x s32>) = COPY $v8
%1:_(<vscale x 1 x s32>) = COPY $v9
%2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -463,13 +475,12 @@ body: |
---
name: test_nxv16s32
body: |
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: bb.0.entry:
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
%0:_(<vscale x 16 x s32>) = COPY $v8
%1:_(<vscale x 16 x s32>) = COPY $v9
%2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -477,9 +488,14 @@ body: |
...
---
-name: test_nxv1s64
-body: |
+name: test_nxv1s64
+body: |
bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
%0:_(<vscale x 1 x s64>) = COPY $v8
%1:_(<vscale x 1 x s64>) = COPY $v9
%2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -529,3 +545,4 @@ body: |
%2:_(<vscale x 8 x s64>) = G_ADD %0, %1
PseudoRET implicit %2
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
index d5c13f403a0de..1b30c2752084f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
@@ -169,3 +169,331 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
index 89541575cf1c8..74152e83c5d11 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
@@ -201,3 +201,330 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
index 881f826e0ed04..a9c9e282421aa 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
@@ -169,3 +169,330 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
index 3c56929ef67bd..dc7645743905e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
@@ -201,3 +201,330 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
index 258d02646186c..2eb839b9527a2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
@@ -181,3 +181,330 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
index c2504273c2af6..8ae992ff751cc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
@@ -219,3 +219,331 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
index c0ba3e95da9cd..6ecfcbb9b86d4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
@@ -169,3 +169,330 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
index 469f8b25f7ec1..c1747b2f04dd5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
@@ -201,3 +201,330 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
>From 37119972da873b7cbca145f74529cdad374e41bb Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 13 Nov 2023 10:44:53 -0500
Subject: [PATCH 3/6] legalize AllVecTys for G_ADD, G_AND, G_SUB, G_OR, G_XOR
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 22 ++++++++++++-------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 6c5f92c5ff818..3eca5e0d3178d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -74,16 +74,23 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
using namespace TargetOpcode;
+ auto AllVecTys = std::initializer_list<LLT>{
+ nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8, nxv1s16,
+ nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16, nxv1s32, nxv2s32, nxv4s32,
+ nxv8s32, nxv16s32, nxv1s64, nxv2s64, nxv4s64, nxv8s64};
+
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
- .legalFor({s32, sXLen, nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8,
- nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16,
- nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
- nxv1s64, nxv2s64, nxv4s64, nxv8s64})
+ .legalFor({s32, sXLen})
+ .legalIf(all(typeInSet(0, AllVecTys),
+ LegalityPredicate([=, &ST](const LegalityQuery &Query) {
+ return ST.hasVInstructions() &&
+ (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
+ (Query.Types[0].getElementCount().getKnownMinValue() != 1 || ST.getELen() == 64);
+ })))
.widenScalarToNextPow2(0)
.clampScalar(0, s32, sXLen);
- getActionDefinitionsBuilder(
- {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
+ getActionDefinitionsBuilder({G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
@@ -341,8 +348,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
// FIXME: We can do custom inline expansion like SelectionDAG.
// FIXME: Legal with Zfa.
- getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
- .libcallFor({s32, s64});
+ getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR}).libcallFor({s32, s64});
getActionDefinitionsBuilder(G_VASTART).customFor({p0});
>From 7894a326471eef26d3a91334dc6fba0581d62602 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 29 Nov 2023 11:41:33 -0500
Subject: [PATCH 4/6] move vector tests under rvv
---
.../legalizer/legalize-add-rv32.mir | 327 -----------------
.../legalizer/legalize-add-rv64.mir | 327 -----------------
.../legalizer/legalize-and-rv32.mir | 328 -----------------
.../legalizer/legalize-and-rv64.mir | 327 -----------------
.../GlobalISel/legalizer/legalize-or-rv32.mir | 327 -----------------
.../GlobalISel/legalizer/legalize-or-rv64.mir | 327 -----------------
.../legalizer/legalize-sub-rv32.mir | 327 -----------------
.../legalizer/legalize-sub-rv64.mir | 328 -----------------
.../legalizer/legalize-xor-rv32.mir | 327 -----------------
.../legalizer/legalize-xor-rv64.mir | 327 -----------------
.../GlobalISel/legalizer/rvv/legalize-add.mir | 329 +++++++++++++++++
.../GlobalISel/legalizer/rvv/legalize-and.mir | 331 ++++++++++++++++++
.../GlobalISel/legalizer/rvv/legalize-or.mir | 330 +++++++++++++++++
.../GlobalISel/legalizer/rvv/legalize-sub.mir | 330 +++++++++++++++++
.../GlobalISel/legalizer/rvv/legalize-xor.mir | 330 +++++++++++++++++
15 files changed, 1650 insertions(+), 3272 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index 14869dbb99e0f..d169eb316dfcb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -181,330 +181,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index 9df48ad2028c9..f394e4d5064ed 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -219,330 +219,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
index 1b30c2752084f..d5c13f403a0de 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
@@ -169,331 +169,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
index 74152e83c5d11..89541575cf1c8 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
@@ -201,330 +201,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_AND %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
index a9c9e282421aa..881f826e0ed04 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
@@ -169,330 +169,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
index dc7645743905e..3c56929ef67bd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
@@ -201,330 +201,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_OR %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
index 2eb839b9527a2..258d02646186c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
@@ -181,330 +181,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
index 8ae992ff751cc..c2504273c2af6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
@@ -219,331 +219,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
- PseudoRET implicit %2
-
-...
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
index 6ecfcbb9b86d4..c0ba3e95da9cd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
@@ -169,330 +169,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
index c1747b2f04dd5..469f8b25f7ec1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
@@ -201,330 +201,3 @@ body: |
PseudoRET implicit $x10, implicit $x11, implicit $x12
...
----
-name: test_nxv1s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
- %0:_(<vscale x 1 x s8>) = COPY $v8
- %1:_(<vscale x 1 x s8>) = COPY $v9
- %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv2s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
- %0:_(<vscale x 2 x s8>) = COPY $v8
- %1:_(<vscale x 2 x s8>) = COPY $v9
- %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
- %0:_(<vscale x 4 x s8>) = COPY $v8
- %1:_(<vscale x 4 x s8>) = COPY $v9
- %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
- %0:_(<vscale x 8 x s8>) = COPY $v8
- %1:_(<vscale x 8 x s8>) = COPY $v9
- %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
- %0:_(<vscale x 16 x s8>) = COPY $v8
- %1:_(<vscale x 16 x s8>) = COPY $v9
- %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
- %0:_(<vscale x 32 x s8>) = COPY $v8
- %1:_(<vscale x 32 x s8>) = COPY $v9
- %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv64s8
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv64s8
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
- %0:_(<vscale x 64 x s8>) = COPY $v8
- %1:_(<vscale x 64 x s8>) = COPY $v9
- %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
- %0:_(<vscale x 1 x s16>) = COPY $v8
- %1:_(<vscale x 1 x s16>) = COPY $v9
- %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
- %0:_(<vscale x 2 x s16>) = COPY $v8
- %1:_(<vscale x 2 x s16>) = COPY $v9
- %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
- %0:_(<vscale x 4 x s16>) = COPY $v8
- %1:_(<vscale x 4 x s16>) = COPY $v9
- %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
- %0:_(<vscale x 8 x s16>) = COPY $v8
- %1:_(<vscale x 8 x s16>) = COPY $v9
- %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
- %0:_(<vscale x 16 x s16>) = COPY $v8
- %1:_(<vscale x 16 x s16>) = COPY $v9
- %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv32s16
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv32s16
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
- %0:_(<vscale x 32 x s16>) = COPY $v8
- %1:_(<vscale x 32 x s16>) = COPY $v9
- %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
- %0:_(<vscale x 1 x s32>) = COPY $v8
- %1:_(<vscale x 1 x s32>) = COPY $v9
- %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
- %0:_(<vscale x 2 x s32>) = COPY $v8
- %1:_(<vscale x 2 x s32>) = COPY $v9
- %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
- %0:_(<vscale x 4 x s32>) = COPY $v8
- %1:_(<vscale x 4 x s32>) = COPY $v9
- %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
- %0:_(<vscale x 8 x s32>) = COPY $v8
- %1:_(<vscale x 8 x s32>) = COPY $v9
- %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv16s32
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv16s32
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
- %0:_(<vscale x 16 x s32>) = COPY $v8
- %1:_(<vscale x 16 x s32>) = COPY $v9
- %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv1s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv1s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
- %0:_(<vscale x 1 x s64>) = COPY $v8
- %1:_(<vscale x 1 x s64>) = COPY $v9
- %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-...
----
-name: test_nxv2s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv2s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
- %0:_(<vscale x 2 x s64>) = COPY $v8
- %1:_(<vscale x 2 x s64>) = COPY $v9
- %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv4s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv4s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
- %0:_(<vscale x 4 x s64>) = COPY $v8
- %1:_(<vscale x 4 x s64>) = COPY $v9
- %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
----
-name: test_nxv8s64
-body: |
- bb.0.entry:
- ; CHECK-LABEL: name: test_nxv8s64
- ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
- ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
- %0:_(<vscale x 8 x s64>) = COPY $v8
- %1:_(<vscale x 8 x s64>) = COPY $v9
- %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
- PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
new file mode 100644
index 0000000000000..745ab0d56632c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
@@ -0,0 +1,329 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
new file mode 100644
index 0000000000000..f089186236a8a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
@@ -0,0 +1,331 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+ PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
new file mode 100644
index 0000000000000..98180b6715716
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
@@ -0,0 +1,330 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
new file mode 100644
index 0000000000000..deee01fbb1512
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
@@ -0,0 +1,330 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+ PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
new file mode 100644
index 0000000000000..1695f845b0f52
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
@@ -0,0 +1,330 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv1s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+ %0:_(<vscale x 1 x s8>) = COPY $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v9
+ %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v9
+ %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+ %0:_(<vscale x 1 x s32>) = COPY $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v9
+ %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv1s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv1s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+ %0:_(<vscale x 1 x s64>) = COPY $v8
+ %1:_(<vscale x 1 x s64>) = COPY $v9
+ %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+ %0:_(<vscale x 2 x s64>) = COPY $v8
+ %1:_(<vscale x 2 x s64>) = COPY $v9
+ %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv4s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+ %0:_(<vscale x 4 x s64>) = COPY $v8
+ %1:_(<vscale x 4 x s64>) = COPY $v9
+ %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
+---
+name: test_nxv8s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+ %0:_(<vscale x 8 x s64>) = COPY $v8
+ %1:_(<vscale x 8 x s64>) = COPY $v9
+ %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+ PseudoRET implicit %2
+
+...
>From e20360aa53b7ef28aac6f9b02fadbd759112d33f Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 29 Nov 2023 13:39:44 -0500
Subject: [PATCH 5/6] copying the latest MachineVerifier cpp file
---
llvm/lib/CodeGen/MachineVerifier.cpp | 8 --------
1 file changed, 8 deletions(-)
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index b1334456026c8..aaf9bd740d137 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1946,9 +1946,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
SrcSize = TRI->getRegSizeInBits(*SrcRC);
}
- if (SrcSize.isZero())
- SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
-
if (DstReg.isPhysical() && SrcTy.isValid()) {
const TargetRegisterClass *DstRC =
TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
@@ -1969,11 +1966,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
!DstSize.isScalable())
break;
- // If the Src is scalable and the Dst is fixed, then Dest can only hold
- // the Src is known to fit in Dest
- if (SrcSize.isScalable() && !DstSize.isScalable() &&
- TypeSize::isKnownLE(DstSize, SrcSize))
- break;
if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
>From cab7d80738ee377c3bc5a20847b74b94b2ae069a Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 29 Nov 2023 13:42:04 -0500
Subject: [PATCH 6/6] test cases with +zve32x for vector add; clang-format
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 20 +-
.../legalizer/rvv/legalize-add-zve32x.mir | 215 ++++++++++++++++++
2 files changed, 227 insertions(+), 8 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 3eca5e0d3178d..c7d61692929ff 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -81,16 +81,19 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
.legalFor({s32, sXLen})
- .legalIf(all(typeInSet(0, AllVecTys),
- LegalityPredicate([=, &ST](const LegalityQuery &Query) {
- return ST.hasVInstructions() &&
- (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
- (Query.Types[0].getElementCount().getKnownMinValue() != 1 || ST.getELen() == 64);
- })))
+ .legalIf(all(
+ typeInSet(0, AllVecTys),
+ LegalityPredicate([=, &ST](const LegalityQuery &Query) {
+ return ST.hasVInstructions() &&
+ (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
+ (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
+ ST.getELen() == 64);
+ })))
.widenScalarToNextPow2(0)
.clampScalar(0, s32, sXLen);
- getActionDefinitionsBuilder({G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
+ getActionDefinitionsBuilder(
+ {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
@@ -348,7 +351,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
// FIXME: We can do custom inline expansion like SelectionDAG.
// FIXME: Legal with Zfa.
- getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR}).libcallFor({s32, s64});
+ getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
+ .libcallFor({s32, s64});
getActionDefinitionsBuilder(G_VASTART).customFor({p0});
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
new file mode 100644
index 0000000000000..be0dad3ca97aa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
@@ -0,0 +1,215 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_nxv2s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+ %0:_(<vscale x 2 x s8>) = COPY $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v9
+ %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+...
+---
+name: test_nxv4s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v9
+ %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv8s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(<vscale x 8 x s8>) = COPY $v9
+ %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv16s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+ %0:_(<vscale x 16 x s8>) = COPY $v8
+ %1:_(<vscale x 16 x s8>) = COPY $v9
+ %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv32s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+ %0:_(<vscale x 32 x s8>) = COPY $v8
+ %1:_(<vscale x 32 x s8>) = COPY $v9
+ %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv64s8
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv64s8
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+ %0:_(<vscale x 64 x s8>) = COPY $v8
+ %1:_(<vscale x 64 x s8>) = COPY $v9
+ %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+ %0:_(<vscale x 2 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v9
+ %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv4s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(<vscale x 4 x s16>) = COPY $v9
+ %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv8s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+ %0:_(<vscale x 8 x s16>) = COPY $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v9
+ %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv16s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+ %0:_(<vscale x 16 x s16>) = COPY $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v9
+ %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv32s16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv32s16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+ %0:_(<vscale x 32 x s16>) = COPY $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v9
+ %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv2s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv2s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+ %0:_(<vscale x 2 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v9
+ %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv4s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv4s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 4 x s32>) = COPY $v9
+ %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv8s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv8s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+ %0:_(<vscale x 8 x s32>) = COPY $v8
+ %1:_(<vscale x 8 x s32>) = COPY $v9
+ %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
+---
+name: test_nxv16s32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_nxv16s32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+ %0:_(<vscale x 16 x s32>) = COPY $v8
+ %1:_(<vscale x 16 x s32>) = COPY $v9
+ %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+ PseudoRET implicit %2
+...
More information about the llvm-commits
mailing list