[llvm] [RISCV][GlobalISel] Represent RISC-V vector types using LLT scalable vectors; and legalize vectorized operations for G_ADD, G_SUB, G_AND, G_OR, and G_XOR opcodes (PR #71400)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 12 07:05:00 PST 2023


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/71400

>From a9809d1c666f53e4b3e000b0db0e85612c369cbf Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 8 Nov 2023 19:22:21 -0500
Subject: [PATCH 01/12] [RISCV][GISEL] Legalize G_ADD, G_SUB, G_AND, G_OR,
 G_XOR; G_ADD legalized

---
 llvm/lib/CodeGen/MachineVerifier.cpp          |   8 +
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  31 +-
 .../legalizer/legalize-add-rv32.mir           | 353 +++++++++++++++--
 .../legalizer/legalize-add-rv64.mir           | 366 ++++++++++++++++--
 4 files changed, 707 insertions(+), 51 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index a015d9bbd2d3f5..a4e412dff18c0e 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1969,6 +1969,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         SrcSize = TRI->getRegSizeInBits(*SrcRC);
     }
 
+    if (SrcSize.isZero())
+      SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
+
     if (DstReg.isPhysical() && SrcTy.isValid()) {
       const TargetRegisterClass *DstRC =
           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
@@ -1989,6 +1992,11 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
+    // If the Src is scalable and the Dst is fixed, then Dest can only hold
+    // the Src is known to fit in Dest
+    if (SrcSize.isScalable() && !DstSize.isScalable() &&
+        TypeSize::isKnownLE(DstSize, SrcSize))
+      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 8f03a7ac41d37b..95b56ac4475dcf 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -47,10 +47,39 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   const LLT s32 = LLT::scalar(32);
   const LLT s64 = LLT::scalar(64);
 
+  const LLT nxv1s8 = LLT::scalable_vector(1, s8);
+  const LLT nxv2s8 = LLT::scalable_vector(2, s8);
+  const LLT nxv4s8 = LLT::scalable_vector(4, s8);
+  const LLT nxv8s8 = LLT::scalable_vector(8, s8);
+  const LLT nxv16s8 = LLT::scalable_vector(16, s8);
+  const LLT nxv32s8 = LLT::scalable_vector(32, s8);
+  const LLT nxv64s8 = LLT::scalable_vector(64, s8);
+
+  const LLT nxv1s16 = LLT::scalable_vector(1, s16);
+  const LLT nxv2s16 = LLT::scalable_vector(2, s16);
+  const LLT nxv4s16 = LLT::scalable_vector(4, s16);
+  const LLT nxv8s16 = LLT::scalable_vector(8, s16);
+  const LLT nxv16s16 = LLT::scalable_vector(16, s16);
+  const LLT nxv32s16 = LLT::scalable_vector(32, s16);
+
+  const LLT nxv1s32 = LLT::scalable_vector(1, s32);
+  const LLT nxv2s32 = LLT::scalable_vector(2, s32);
+  const LLT nxv4s32 = LLT::scalable_vector(4, s32);
+  const LLT nxv8s32 = LLT::scalable_vector(8, s32);
+  const LLT nxv16s32 = LLT::scalable_vector(16, s32);
+
+  const LLT nxv1s64 = LLT::scalable_vector(1, s64);
+  const LLT nxv2s64 = LLT::scalable_vector(2, s64);
+  const LLT nxv4s64 = LLT::scalable_vector(4, s64);
+  const LLT nxv8s64 = LLT::scalable_vector(8, s64);
+
   using namespace TargetOpcode;
 
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
-      .legalFor({s32, sXLen})
+      .legalFor({s32, sXLen, nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8,
+                 nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16,
+                 nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
+                 nxv1s64, nxv2s64, nxv4s64, nxv8s64})
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, sXLen);
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index d169eb316dfcb7..2c63b92c91b4f3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -142,29 +142,30 @@ body:             |
 ---
 name:            add_i96
 body:             |
+  ; CHECK-LABEL: name: add_i96
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   %lo1:_(s32) = COPY $x10
+  ; CHECK-NEXT:   %mid1:_(s32) = COPY $x11
+  ; CHECK-NEXT:   %hi1:_(s32) = COPY $x12
+  ; CHECK-NEXT:   %lo2:_(s32) = COPY $x13
+  ; CHECK-NEXT:   %mid2:_(s32) = COPY $x14
+  ; CHECK-NEXT:   %hi2:_(s32) = COPY $x15
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
+  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
+  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
+  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
+  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
+  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s32)
+  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s32)
+  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
-    ; CHECK-LABEL: name: add_i96
-    ; CHECK: %lo1:_(s32) = COPY $x10
-    ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
-    ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
-    ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
-    ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
-    ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
-    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
-    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
-    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
-    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
-    ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
-    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
-    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s32) = COPY $x10
     %mid1:_(s32) = COPY $x11
     %hi1:_(s32) = COPY $x12
@@ -181,3 +182,311 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:		test_nxv1s8
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  ; CHECK-LABEL: name: test_nxv64s8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+  bb.0.entry:
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s16
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  ; CHECK-LABEL: name: test_nxv32s16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+  bb.0.entry:
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s32
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  ; CHECK-LABEL: name: test_nxv16s32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+  bb.0.entry:
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s64
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index f394e4d5064edc..b4eefb7354511a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -174,35 +174,36 @@ body:             |
 ---
 name:            add_i192
 body:             |
+  ; CHECK-LABEL: name: add_i192
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   %lo1:_(s64) = COPY $x10
+  ; CHECK-NEXT:   %mid1:_(s64) = COPY $x11
+  ; CHECK-NEXT:   %hi1:_(s64) = COPY $x12
+  ; CHECK-NEXT:   %lo2:_(s64) = COPY $x13
+  ; CHECK-NEXT:   %mid2:_(s64) = COPY $x14
+  ; CHECK-NEXT:   %hi2:_(s64) = COPY $x15
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
+  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
+  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
+  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
+  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
+  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s64)
+  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s64)
+  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
-    ; CHECK-LABEL: name: add_i192
-    ; CHECK: %lo1:_(s64) = COPY $x10
-    ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
-    ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
-    ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
-    ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
-    ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
-    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
-    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
-    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
-    ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
-    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
-    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s64) = COPY $x10
     %mid1:_(s64) = COPY $x11
     %hi1:_(s64) = COPY $x12
@@ -219,3 +220,312 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:		test_nxv1s8
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  ; CHECK-LABEL: name: test_nxv64s8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+  bb.0.entry:
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s16
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  ; CHECK-LABEL: name: test_nxv32s16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+  bb.0.entry:
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s32
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  ; CHECK-LABEL: name: test_nxv16s32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+  bb.0.entry:
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s64
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+

>From 591b9d6835e9f2ad5fb3171707dcf7cadddba61e Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Thu, 9 Nov 2023 11:18:42 -0500
Subject: [PATCH 02/12] update G_ADD test cases; add test cases for G_SUB,
 G_AND, G_OR, and G_XOR

---
 .../legalizer/legalize-add-rv32.mir           | 116 ++++---
 .../legalizer/legalize-add-rv64.mir           | 127 ++++---
 .../legalizer/legalize-and-rv32.mir           | 328 ++++++++++++++++++
 .../legalizer/legalize-and-rv64.mir           | 327 +++++++++++++++++
 .../GlobalISel/legalizer/legalize-or-rv32.mir | 327 +++++++++++++++++
 .../GlobalISel/legalizer/legalize-or-rv64.mir | 327 +++++++++++++++++
 .../legalizer/legalize-sub-rv32.mir           | 327 +++++++++++++++++
 .../legalizer/legalize-sub-rv64.mir           | 328 ++++++++++++++++++
 .../legalizer/legalize-xor-rv32.mir           | 327 +++++++++++++++++
 .../legalizer/legalize-xor-rv64.mir           | 327 +++++++++++++++++
 10 files changed, 2757 insertions(+), 104 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index 2c63b92c91b4f3..14869dbb99e0fa 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -142,30 +142,29 @@ body:             |
 ---
 name:            add_i96
 body:             |
-  ; CHECK-LABEL: name: add_i96
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   %lo1:_(s32) = COPY $x10
-  ; CHECK-NEXT:   %mid1:_(s32) = COPY $x11
-  ; CHECK-NEXT:   %hi1:_(s32) = COPY $x12
-  ; CHECK-NEXT:   %lo2:_(s32) = COPY $x13
-  ; CHECK-NEXT:   %mid2:_(s32) = COPY $x14
-  ; CHECK-NEXT:   %hi2:_(s32) = COPY $x15
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
-  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
-  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
-  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
-  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
-  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
-  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
-  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
-  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s32)
-  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s32)
-  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s32)
-  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
+    ; CHECK-LABEL: name: add_i96
+    ; CHECK: %lo1:_(s32) = COPY $x10
+    ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
+    ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
+    ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
+    ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
+    ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
+    ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s32) = COPY $x10
     %mid1:_(s32) = COPY $x11
     %hi1:_(s32) = COPY $x12
@@ -183,9 +182,14 @@ body:             |
 
 ...
 ---
-name:		test_nxv1s8
-body:		 |
+name:  test_nxv1s8
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
@@ -270,13 +274,12 @@ body:   |
 ---
 name:  test_nxv64s8
 body:   |
-  ; CHECK-LABEL: name: test_nxv64s8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
     %0:_(<vscale x 64 x s8>) = COPY $v8
     %1:_(<vscale x 64 x s8>) = COPY $v9
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -284,9 +287,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s16
-body:		 |
+name:  test_nxv1s16
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -355,13 +363,12 @@ body:   |
 ---
 name:  test_nxv32s16
 body:   |
-  ; CHECK-LABEL: name: test_nxv32s16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
     %0:_(<vscale x 32 x s16>) = COPY $v8
     %1:_(<vscale x 32 x s16>) = COPY $v9
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -369,9 +376,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s32
-body:		 |
+name:  test_nxv1s32
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -425,13 +437,12 @@ body:   |
 ---
 name:  test_nxv16s32
 body:   |
-  ; CHECK-LABEL: name: test_nxv16s32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
     %0:_(<vscale x 16 x s32>) = COPY $v8
     %1:_(<vscale x 16 x s32>) = COPY $v9
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -439,9 +450,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s64
-body:		 |
+name:  test_nxv1s64
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -490,3 +506,5 @@ body:   |
     %1:_(<vscale x 8 x s64>) = COPY $v9
     %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
     PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index b4eefb7354511a..9df48ad2028c93 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -174,36 +174,35 @@ body:             |
 ---
 name:            add_i192
 body:             |
-  ; CHECK-LABEL: name: add_i192
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   %lo1:_(s64) = COPY $x10
-  ; CHECK-NEXT:   %mid1:_(s64) = COPY $x11
-  ; CHECK-NEXT:   %hi1:_(s64) = COPY $x12
-  ; CHECK-NEXT:   %lo2:_(s64) = COPY $x13
-  ; CHECK-NEXT:   %mid2:_(s64) = COPY $x14
-  ; CHECK-NEXT:   %hi2:_(s64) = COPY $x15
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
-  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
-  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
-  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
-  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
-  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
-  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
-  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
-  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s64)
-  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s64)
-  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s64)
-  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
+    ; CHECK-LABEL: name: add_i192
+    ; CHECK: %lo1:_(s64) = COPY $x10
+    ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
+    ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
+    ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
+    ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
+    ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
+    ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s64) = COPY $x10
     %mid1:_(s64) = COPY $x11
     %hi1:_(s64) = COPY $x12
@@ -221,9 +220,14 @@ body:             |
 
 ...
 ---
-name:		test_nxv1s8
-body:		 |
+name:  test_nxv1s8
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
@@ -308,13 +312,12 @@ body:   |
 ---
 name:  test_nxv64s8
 body:   |
-  ; CHECK-LABEL: name: test_nxv64s8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
     %0:_(<vscale x 64 x s8>) = COPY $v8
     %1:_(<vscale x 64 x s8>) = COPY $v9
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -322,9 +325,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s16
-body:		 |
+name:  test_nxv1s16
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -393,13 +401,12 @@ body:   |
 ---
 name:  test_nxv32s16
 body:   |
-  ; CHECK-LABEL: name: test_nxv32s16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
     %0:_(<vscale x 32 x s16>) = COPY $v8
     %1:_(<vscale x 32 x s16>) = COPY $v9
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -407,9 +414,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s32
-body:		 |
+name:  test_nxv1s32
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -463,13 +475,12 @@ body:   |
 ---
 name:  test_nxv16s32
 body:   |
-  ; CHECK-LABEL: name: test_nxv16s32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
     %0:_(<vscale x 16 x s32>) = COPY $v8
     %1:_(<vscale x 16 x s32>) = COPY $v9
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -477,9 +488,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s64
-body:		 |
+name:  test_nxv1s64
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -529,3 +545,4 @@ body:   |
     %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
     PseudoRET implicit %2
 
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
index d5c13f403a0dee..1b30c2752084f2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
@@ -169,3 +169,331 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
index 89541575cf1c8f..74152e83c5d111 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
@@ -201,3 +201,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
index 881f826e0ed045..a9c9e282421aaa 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
@@ -169,3 +169,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
index 3c56929ef67bd2..dc7645743905ed 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
@@ -201,3 +201,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
index 258d02646186cd..2eb839b9527a2e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
@@ -181,3 +181,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
index c2504273c2af67..8ae992ff751cc9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
@@ -219,3 +219,331 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
index c0ba3e95da9cde..6ecfcbb9b86d4c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
@@ -169,3 +169,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
index 469f8b25f7ec1e..c1747b2f04dd5f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
@@ -201,3 +201,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...

>From 48702fe9a8877436fd36da237a0a6ebcae8ca7ba Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 13 Nov 2023 10:44:53 -0500
Subject: [PATCH 03/12] legalize AllVecTys for G_ADD, G_AND, G_SUB, G_OR, G_XOR

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 22 ++++++++++++-------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 95b56ac4475dcf..eee0407a4234e8 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -75,16 +75,23 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   using namespace TargetOpcode;
 
+  auto AllVecTys = std::initializer_list<LLT>{
+      nxv1s8,  nxv2s8,   nxv4s8,  nxv8s8,   nxv16s8,  nxv32s8, nxv64s8, nxv1s16,
+      nxv2s16, nxv4s16,  nxv8s16, nxv16s16, nxv32s16, nxv1s32, nxv2s32, nxv4s32,
+      nxv8s32, nxv16s32, nxv1s64, nxv2s64,  nxv4s64,  nxv8s64};
+
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
-      .legalFor({s32, sXLen, nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8,
-                 nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16,
-                 nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
-                 nxv1s64, nxv2s64, nxv4s64, nxv8s64})
+      .legalFor({s32, sXLen})
+      .legalIf(all(typeInSet(0, AllVecTys),
+                   LegalityPredicate([=, &ST](const LegalityQuery &Query) {
+                     return ST.hasVInstructions() && 
+										 (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
+										 (Query.Types[0].getElementCount().getKnownMinValue() != 1 || ST.getELen() == 64);
+                   })))
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, sXLen);
 
-  getActionDefinitionsBuilder(
-      {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
+  getActionDefinitionsBuilder({G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
 
   getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
 
@@ -342,8 +349,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   // FIXME: We can do custom inline expansion like SelectionDAG.
   // FIXME: Legal with Zfa.
-  getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
-      .libcallFor({s32, s64});
+  getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR}).libcallFor({s32, s64});
 
   getActionDefinitionsBuilder(G_VASTART).customFor({p0});
 

>From 0b40b1f4c5b5fa667963a54a4204d481c18a1e33 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 29 Nov 2023 11:41:33 -0500
Subject: [PATCH 04/12] move vector tests under rvv

---
 .../legalizer/legalize-add-rv32.mir           | 327 -----------------
 .../legalizer/legalize-add-rv64.mir           | 327 -----------------
 .../legalizer/legalize-and-rv32.mir           | 328 -----------------
 .../legalizer/legalize-and-rv64.mir           | 327 -----------------
 .../GlobalISel/legalizer/legalize-or-rv32.mir | 327 -----------------
 .../GlobalISel/legalizer/legalize-or-rv64.mir | 327 -----------------
 .../legalizer/legalize-sub-rv32.mir           | 327 -----------------
 .../legalizer/legalize-sub-rv64.mir           | 328 -----------------
 .../legalizer/legalize-xor-rv32.mir           | 327 -----------------
 .../legalizer/legalize-xor-rv64.mir           | 327 -----------------
 .../GlobalISel/legalizer/rvv/legalize-add.mir | 329 +++++++++++++++++
 .../GlobalISel/legalizer/rvv/legalize-and.mir | 331 ++++++++++++++++++
 .../GlobalISel/legalizer/rvv/legalize-or.mir  | 330 +++++++++++++++++
 .../GlobalISel/legalizer/rvv/legalize-sub.mir | 330 +++++++++++++++++
 .../GlobalISel/legalizer/rvv/legalize-xor.mir | 330 +++++++++++++++++
 15 files changed, 1650 insertions(+), 3272 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index 14869dbb99e0fa..d169eb316dfcb7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -181,330 +181,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index 9df48ad2028c93..f394e4d5064edc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -219,330 +219,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
index 1b30c2752084f2..d5c13f403a0dee 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv32.mir
@@ -169,331 +169,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
index 74152e83c5d111..89541575cf1c8f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-and-rv64.mir
@@ -201,330 +201,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
index a9c9e282421aaa..881f826e0ed045 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv32.mir
@@ -169,330 +169,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
index dc7645743905ed..3c56929ef67bd2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-or-rv64.mir
@@ -201,330 +201,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
index 2eb839b9527a2e..258d02646186cd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv32.mir
@@ -181,330 +181,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
index 8ae992ff751cc9..c2504273c2af67 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
@@ -219,331 +219,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
-
-...
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
index 6ecfcbb9b86d4c..c0ba3e95da9cde 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv32.mir
@@ -169,330 +169,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
index c1747b2f04dd5f..469f8b25f7ec1e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-xor-rv64.mir
@@ -201,330 +201,3 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
----
-name:  test_nxv1s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
-    %0:_(<vscale x 1 x s8>) = COPY $v8
-    %1:_(<vscale x 1 x s8>) = COPY $v9
-    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv2s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
-    %0:_(<vscale x 2 x s8>) = COPY $v8
-    %1:_(<vscale x 2 x s8>) = COPY $v9
-    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
-    %0:_(<vscale x 4 x s8>) = COPY $v8
-    %1:_(<vscale x 4 x s8>) = COPY $v9
-    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
-    %0:_(<vscale x 8 x s8>) = COPY $v8
-    %1:_(<vscale x 8 x s8>) = COPY $v9
-    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
-    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
-    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv64s8
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
-    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
-    %0:_(<vscale x 1 x s16>) = COPY $v8
-    %1:_(<vscale x 1 x s16>) = COPY $v9
-    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
-    %0:_(<vscale x 2 x s16>) = COPY $v8
-    %1:_(<vscale x 2 x s16>) = COPY $v9
-    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
-    %0:_(<vscale x 4 x s16>) = COPY $v8
-    %1:_(<vscale x 4 x s16>) = COPY $v9
-    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
-    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
-    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv32s16
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
-    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
-    %0:_(<vscale x 1 x s32>) = COPY $v8
-    %1:_(<vscale x 1 x s32>) = COPY $v9
-    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
-    %0:_(<vscale x 2 x s32>) = COPY $v8
-    %1:_(<vscale x 2 x s32>) = COPY $v9
-    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
-    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
-    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv16s32
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
-    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv1s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
-    %0:_(<vscale x 1 x s64>) = COPY $v8
-    %1:_(<vscale x 1 x s64>) = COPY $v9
-    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-...
----
-name:  test_nxv2s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
-    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv4s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
-    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
----
-name:  test_nxv8s64
-body:   |
-  bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
-    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
new file mode 100644
index 00000000000000..745ab0d56632cb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
@@ -0,0 +1,329 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
new file mode 100644
index 00000000000000..f089186236a8af
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
@@ -0,0 +1,331 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - | FileCheck %s
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
new file mode 100644
index 00000000000000..98180b6715716e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
@@ -0,0 +1,330 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
new file mode 100644
index 00000000000000..deee01fbb1512d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
@@ -0,0 +1,330 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
new file mode 100644
index 00000000000000..1695f845b0f525
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
@@ -0,0 +1,330 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...

>From ae7caf65ac27508f1e953b59b6c5ea14446fd802 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 29 Nov 2023 13:39:44 -0500
Subject: [PATCH 05/12] copying the latest MachineVerifier cpp file

---
 llvm/lib/CodeGen/MachineVerifier.cpp | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index a4e412dff18c0e..a015d9bbd2d3f5 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1969,9 +1969,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         SrcSize = TRI->getRegSizeInBits(*SrcRC);
     }
 
-    if (SrcSize.isZero())
-      SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
-
     if (DstReg.isPhysical() && SrcTy.isValid()) {
       const TargetRegisterClass *DstRC =
           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
@@ -1992,11 +1989,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
-    // If the Src is scalable and the Dst is fixed, then Dest can only hold
-    // the Src is known to fit in Dest
-    if (SrcSize.isScalable() && !DstSize.isScalable() &&
-        TypeSize::isKnownLE(DstSize, SrcSize))
-      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {

>From 3633e973c4a575216bb5f8a2da8a5da679309759 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 29 Nov 2023 13:42:04 -0500
Subject: [PATCH 06/12] test cases with +zve32x for vector add; clang-format

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  20 +-
 .../legalizer/rvv/legalize-add-zve32x.mir     | 215 ++++++++++++++++++
 2 files changed, 227 insertions(+), 8 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index eee0407a4234e8..0524c4061d9cde 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -82,16 +82,19 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
       .legalFor({s32, sXLen})
-      .legalIf(all(typeInSet(0, AllVecTys),
-                   LegalityPredicate([=, &ST](const LegalityQuery &Query) {
-                     return ST.hasVInstructions() && 
-										 (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
-										 (Query.Types[0].getElementCount().getKnownMinValue() != 1 || ST.getELen() == 64);
-                   })))
+      .legalIf(all(
+          typeInSet(0, AllVecTys),
+          LegalityPredicate([=, &ST](const LegalityQuery &Query) {
+						return ST.hasVInstructions() &&
+                   (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
+                   (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
+                    ST.getELen() == 64);
+          })))
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, sXLen);
 
-  getActionDefinitionsBuilder({G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
+  getActionDefinitionsBuilder(
+      {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
 
   getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
 
@@ -349,7 +352,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   // FIXME: We can do custom inline expansion like SelectionDAG.
   // FIXME: Legal with Zfa.
-  getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR}).libcallFor({s32, s64});
+  getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
+      .libcallFor({s32, s64});
 
   getActionDefinitionsBuilder(G_VASTART).customFor({p0});
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
new file mode 100644
index 00000000000000..be0dad3ca97aa2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
@@ -0,0 +1,215 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...

>From 6d8accd7e58ca5c1d6787fec3923894ee9b7631c Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 13:37:41 -0500
Subject: [PATCH 07/12] clang format

---
 llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 0524c4061d9cde..2f203e273b9f7e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -85,8 +85,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
       .legalIf(all(
           typeInSet(0, AllVecTys),
           LegalityPredicate([=, &ST](const LegalityQuery &Query) {
-						return ST.hasVInstructions() &&
-                   (Query.Types[0].getScalarSizeInBits() != 64 || ST.hasVInstructionsI64()) &&
+            return ST.hasVInstructions() &&
+                   (Query.Types[0].getScalarSizeInBits() != 64 ||
+                    ST.hasVInstructionsI64()) &&
                    (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
                     ST.getELen() == 64);
           })))

>From c4df8fe0e39a5cd95ffb181b04cdeafe5153fb0a Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 13:58:33 -0500
Subject: [PATCH 08/12] remove std initializer and just use just braces

---
 llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 2f203e273b9f7e..177297ed629c17 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -75,10 +75,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   using namespace TargetOpcode;
 
-  auto AllVecTys = std::initializer_list<LLT>{
-      nxv1s8,  nxv2s8,   nxv4s8,  nxv8s8,   nxv16s8,  nxv32s8, nxv64s8, nxv1s16,
-      nxv2s16, nxv4s16,  nxv8s16, nxv16s16, nxv32s16, nxv1s32, nxv2s32, nxv4s32,
-      nxv8s32, nxv16s32, nxv1s64, nxv2s64,  nxv4s64,  nxv8s64};
+  auto AllVecTys = {nxv1s8,   nxv2s8,  nxv4s8,  nxv8s8,  nxv16s8, nxv32s8,
+                    nxv64s8,  nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
+                    nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
+                    nxv1s64,  nxv2s64, nxv4s64, nxv8s64};
 
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
       .legalFor({s32, sXLen})

>From 48cf75e5699529818821a5bc1226331a4a642483 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 13:59:03 -0500
Subject: [PATCH 09/12] remove extra set of ... ---

---
 .../RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir      | 2 --
 1 file changed, 2 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
index be0dad3ca97aa2..400d77ce4847f9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
@@ -16,8 +16,6 @@ body:   |
     PseudoRET implicit %2
 ...
 ---
-...
----
 name:  test_nxv4s8
 body:   |
   bb.0.entry:

>From 1f51c477b5015630c2db17f0e7d29aa58ac21e0c Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Fri, 8 Dec 2023 14:21:59 -0500
Subject: [PATCH 10/12] add -mattr=+v to G_AND test cases

---
 .../CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
index f089186236a8af..3671321e925d4f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s
-# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 ---
 name:  test_nxv1s8
 body:   |

>From a44c8337807d1b733e567d5ce70e16599d5f7e4e Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 12 Dec 2023 09:11:51 -0500
Subject: [PATCH 11/12] scalable vectorized G_ADD, G_SUB, G_AND, G_OR, G_XOR
 don't fall back to DAGISel

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 10 ++--
 .../RISCV/GlobalISel/irtranslator/vec-alu.ll  | 53 +++++++++++++++++++
 2 files changed, 59 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f2ec422b54a926..9edcd8fda9604c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -19854,11 +19854,13 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
 }
 
 bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
-  // At the moment, the only scalable instruction GISel knows how to lower is
-  // ret with scalable argument.
 
-  if (Inst.getType()->isScalableTy())
-    return true;
+  // GISel support is in progress or complete for G_ADD, G_SUB, G_AND, G_OR, and
+  // G_XOR.
+  unsigned Op = Inst.getOpcode();
+  if (Op == Instruction::Add || Op == Instruction::Sub ||
+      Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor)
+    return false;
 
   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
     if (Inst.getOperand(i)->getType()->isScalableTy() &&
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll
new file mode 100644
index 00000000000000..27ae529f85c405
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-alu.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV32I %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV64I %s
+
+define void @add_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+  ; CHECK-LABEL: name: add_nxv2i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $v8, $v9
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   PseudoRET
+  %c = add <vscale x 2 x i32> %a, %b
+  ret void
+}
+
+define void @sub_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+  ; CHECK-LABEL: name: sub_nxv2i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $v8, $v9
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   PseudoRET
+  %c = sub <vscale x 2 x i32> %a, %b
+  ret void
+}
+
+define void @and_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+  ; CHECK-LABEL: name: and_nxv2i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $v8, $v9
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   PseudoRET
+  %c = and <vscale x 2 x i32> %a, %b
+  ret void
+}
+
+define void @or_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+  ; CHECK-LABEL: name: or_nxv2i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $v8, $v9
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   PseudoRET
+  %c = or <vscale x 2 x i32> %a, %b
+  ret void
+}
+
+define void @xor_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+  ; CHECK-LABEL: name: xor_nxv2i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $v8, $v9
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   PseudoRET
+  %c = xor <vscale x 2 x i32> %a, %b
+  ret void
+}

>From e42493e88c17346a0131e9b548c49ed720a11500 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 12 Dec 2023 10:03:45 -0500
Subject: [PATCH 12/12] update all legalize mir files with correct LMUL>1 cases

---
 .../legalizer/rvv/legalize-add-zve32x.mir     | 283 +++++++-----
 .../GlobalISel/legalizer/rvv/legalize-add.mir | 386 +++++++++-------
 .../GlobalISel/legalizer/rvv/legalize-and.mir | 386 +++++++++-------
 .../GlobalISel/legalizer/rvv/legalize-or.mir  | 385 +++++++++-------
 .../GlobalISel/legalizer/rvv/legalize-sub.mir | 385 +++++++++-------
 .../GlobalISel/legalizer/rvv/legalize-xor.mir | 431 ++++++++++--------
 6 files changed, 1331 insertions(+), 925 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
index 400d77ce4847f9..a6ded6b86cf3e2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir
@@ -2,212 +2,273 @@
 # RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
 ---
-name:  test_nxv2s8
-body:   |
+name:            vadd_vv_nxv2i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
+
+    ; CHECK-LABEL: name: vadd_vv_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv4s8
-body:   |
+name:            vadd_vv_nxv4i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
+
+    ; CHECK-LABEL: name: vadd_vv_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv8s8
-body:   |
+name:            vadd_vv_nxv8i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
+
+    ; CHECK-LABEL: name: vadd_vv_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv16s8
-body:   |
+name:            vadd_vv_nxv16i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
 ...
 ---
-name:  test_nxv32s8
-body:   |
+name:            vadd_vv_nxv32i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
 ...
 ---
-name:  test_nxv64s8
-body:   |
+name:            vadd_vv_nxv64i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
 ...
 ---
-name:  test_nxv2s16
-body:   |
+name:            vadd_vv_nxv2i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
+
+    ; CHECK-LABEL: name: vadd_vv_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv4s16
-body:   |
+name:            vadd_vv_nxv4i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
+
+    ; CHECK-LABEL: name: vadd_vv_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv8s16
-body:   |
+name:            vadd_vv_nxv8i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
 ...
 ---
-name:  test_nxv16s16
-body:   |
+name:            vadd_vv_nxv16i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
 ...
 ---
-name:  test_nxv32s16
-body:   |
+name:            vadd_vv_nxv32i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
 ...
 ---
-name:  test_nxv2s32
-body:   |
+name:            vadd_vv_nxv2i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
+
+    ; CHECK-LABEL: name: vadd_vv_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv4s32
-body:   |
+name:            vadd_vv_nxv4i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
 ...
 ---
-name:  test_nxv8s32
-body:   |
+name:            vadd_vv_nxv8i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
 ...
 ---
-name:  test_nxv16s32
-body:   |
+name:            vadd_vv_nxv16i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: vadd_vv_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
 ...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
index 745ab0d56632cb..aa0ab96f8ded18 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add.mir
@@ -2,328 +2,398 @@
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 ---
-name:  test_nxv1s8
-body:   |
+name:            test_nxv1i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
+
+    ; CHECK-LABEL: name: test_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s8
-body:   |
+name:            test_nxv2i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
+
+    ; CHECK-LABEL: name: test_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s8
-body:   |
+name:            test_nxv4i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
+
+    ; CHECK-LABEL: name: test_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s8
-body:   |
+name:            test_nxv8i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
+
+    ; CHECK-LABEL: name: test_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv16s8
-body:   |
+name:            test_nxv16i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv32s8
-body:   |
+name:            test_nxv32i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv64s8
-body:   |
+name:            test_nxv64i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s16
-body:   |
+name:            test_nxv1i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
+
+    ; CHECK-LABEL: name: test_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s16
-body:   |
+name:            test_nxv2i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
+
+    ; CHECK-LABEL: name: test_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s16
-body:   |
+name:            test_nxv4i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
+
+    ; CHECK-LABEL: name: test_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s16
-body:   |
+name:            test_nxv8i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv16s16
-body:   |
+name:            test_nxv16i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv32s16
-body:   |
+name:            test_nxv32i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s32
-body:   |
+name:            test_nxv1i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
+
+    ; CHECK-LABEL: name: test_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s32
-body:   |
+name:            test_nxv2i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
+
+    ; CHECK-LABEL: name: test_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s32
-body:   |
+name:            test_nxv4i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv8s32
-body:   |
+name:            test_nxv8i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv16s32
-body:   |
+name:            test_nxv16i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s64
-body:   |
+name:            test_nxv1i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
+
+    ; CHECK-LABEL: name: test_nxv1i64
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s64
-body:   |
+name:            test_nxv2i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv4s64
-body:   |
+name:            test_nxv4i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv8s64
-body:   |
+name:            test_nxv8i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
 
 ...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
index 3671321e925d4f..8295e55c079c17 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-and.mir
@@ -2,330 +2,398 @@
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 ---
-name:  test_nxv1s8
-body:   |
+name:            test_nxv1i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
+
+    ; CHECK-LABEL: name: test_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv2s8
-body:   |
+name:            test_nxv2i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
+
+    ; CHECK-LABEL: name: test_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s8
-body:   |
+name:            test_nxv4i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
+
+    ; CHECK-LABEL: name: test_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s8
-body:   |
+name:            test_nxv8i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
+
+    ; CHECK-LABEL: name: test_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv16s8
-body:   |
+name:            test_nxv16i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv32s8
-body:   |
+name:            test_nxv32i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv64s8
-body:   |
+name:            test_nxv64i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s16
-body:   |
+name:            test_nxv1i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
+
+    ; CHECK-LABEL: name: test_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s16
-body:   |
+name:            test_nxv2i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
+
+    ; CHECK-LABEL: name: test_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s16
-body:   |
+name:            test_nxv4i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
+
+    ; CHECK-LABEL: name: test_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s16
-body:   |
+name:            test_nxv8i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv16s16
-body:   |
+name:            test_nxv16i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv32s16
-body:   |
+name:            test_nxv32i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s32
-body:   |
+name:            test_nxv1i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
+
+    ; CHECK-LABEL: name: test_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s32
-body:   |
+name:            test_nxv2i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
+
+    ; CHECK-LABEL: name: test_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s32
-body:   |
+name:            test_nxv4i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv8s32
-body:   |
+name:            test_nxv8i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv16s32
-body:   |
+name:            test_nxv16i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s64
-body:   |
+name:            test_nxv1i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
+
+    ; CHECK-LABEL: name: test_nxv1i64
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s64
-body:   |
+name:            test_nxv2i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv4s64
-body:   |
+name:            test_nxv4i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv8s64
-body:   |
+name:            test_nxv8i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_AND %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
 
 ...
-
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
index 98180b6715716e..22c2258f2b920c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-or.mir
@@ -2,329 +2,398 @@
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 ---
-name:  test_nxv1s8
-body:   |
+name:            test_nxv1i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
+
+    ; CHECK-LABEL: name: test_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv2s8
-body:   |
+name:            test_nxv2i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
+
+    ; CHECK-LABEL: name: test_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s8
-body:   |
+name:            test_nxv4i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
+
+    ; CHECK-LABEL: name: test_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s8
-body:   |
+name:            test_nxv8i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
+
+    ; CHECK-LABEL: name: test_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv16s8
-body:   |
+name:            test_nxv16i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv32s8
-body:   |
+name:            test_nxv32i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv64s8
-body:   |
+name:            test_nxv64i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s16
-body:   |
+name:            test_nxv1i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
+
+    ; CHECK-LABEL: name: test_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s16
-body:   |
+name:            test_nxv2i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
+
+    ; CHECK-LABEL: name: test_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s16
-body:   |
+name:            test_nxv4i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
+
+    ; CHECK-LABEL: name: test_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s16
-body:   |
+name:            test_nxv8i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv16s16
-body:   |
+name:            test_nxv16i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv32s16
-body:   |
+name:            test_nxv32i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s32
-body:   |
+name:            test_nxv1i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
+
+    ; CHECK-LABEL: name: test_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s32
-body:   |
+name:            test_nxv2i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
+
+    ; CHECK-LABEL: name: test_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s32
-body:   |
+name:            test_nxv4i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv8s32
-body:   |
+name:            test_nxv8i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv16s32
-body:   |
+name:            test_nxv16i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s64
-body:   |
+name:            test_nxv1i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
+
+    ; CHECK-LABEL: name: test_nxv1i64
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s64
-body:   |
+name:            test_nxv2i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv4s64
-body:   |
+name:            test_nxv4i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv8s64
-body:   |
+name:            test_nxv8i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_OR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
 
 ...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
index deee01fbb1512d..eb961b8aa30349 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sub.mir
@@ -2,329 +2,398 @@
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 ---
-name:  test_nxv1s8
-body:   |
+name:            test_nxv1i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
+
+    ; CHECK-LABEL: name: test_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv2s8
-body:   |
+name:            test_nxv2i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
+
+    ; CHECK-LABEL: name: test_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s8
-body:   |
+name:            test_nxv4i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
+
+    ; CHECK-LABEL: name: test_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s8
-body:   |
+name:            test_nxv8i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
+
+    ; CHECK-LABEL: name: test_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv16s8
-body:   |
+name:            test_nxv16i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv32s8
-body:   |
+name:            test_nxv32i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv64s8
-body:   |
+name:            test_nxv64i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s16
-body:   |
+name:            test_nxv1i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
+
+    ; CHECK-LABEL: name: test_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s16
-body:   |
+name:            test_nxv2i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
+
+    ; CHECK-LABEL: name: test_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s16
-body:   |
+name:            test_nxv4i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
+
+    ; CHECK-LABEL: name: test_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s16
-body:   |
+name:            test_nxv8i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv16s16
-body:   |
+name:            test_nxv16i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv32s16
-body:   |
+name:            test_nxv32i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s32
-body:   |
+name:            test_nxv1i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
+
+    ; CHECK-LABEL: name: test_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s32
-body:   |
+name:            test_nxv2i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
+
+    ; CHECK-LABEL: name: test_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s32
-body:   |
+name:            test_nxv4i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv8s32
-body:   |
+name:            test_nxv8i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv16s32
-body:   |
+name:            test_nxv16i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s64
-body:   |
+name:            test_nxv1i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
+
+    ; CHECK-LABEL: name: test_nxv1i64
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s64
-body:   |
+name:            test_nxv2i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv4s64
-body:   |
+name:            test_nxv4i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv8s64
-body:   |
+name:            test_nxv8i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
 
 ...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
index 1695f845b0f525..4de02b1a04da48 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-xor.mir
@@ -1,330 +1,399 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
-# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 ---
-name:  test_nxv1s8
-body:   |
+name:            test_nxv1i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s8
+
+    ; CHECK-LABEL: name: test_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv2s8
-body:   |
+name:            test_nxv2i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s8
+
+    ; CHECK-LABEL: name: test_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s8
-body:   |
+name:            test_nxv4i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s8
+
+    ; CHECK-LABEL: name: test_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s8
-body:   |
+name:            test_nxv8i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s8
+
+    ; CHECK-LABEL: name: test_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv16s8
-body:   |
+name:            test_nxv16i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-    %1:_(<vscale x 16 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 16 x s8>) = COPY $v8m2
+    %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv32s8
-body:   |
+name:            test_nxv32i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
-    %0:_(<vscale x 32 x s8>) = COPY $v8
-    %1:_(<vscale x 32 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 32 x s8>) = COPY $v8m4
+    %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv64s8
-body:   |
+name:            test_nxv64i8
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv64s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
-    %0:_(<vscale x 64 x s8>) = COPY $v8
-    %1:_(<vscale x 64 x s8>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 64 x s8>) = COPY $v8m8
+    %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s16
-body:   |
+name:            test_nxv1i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s16
+
+    ; CHECK-LABEL: name: test_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s16
-body:   |
+name:            test_nxv2i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s16
+
+    ; CHECK-LABEL: name: test_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s16
-body:   |
+name:            test_nxv4i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s16
+
+    ; CHECK-LABEL: name: test_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv8s16
-body:   |
+name:            test_nxv8i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
-    %0:_(<vscale x 8 x s16>) = COPY $v8
-    %1:_(<vscale x 8 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 8 x s16>) = COPY $v8m2
+    %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv16s16
-body:   |
+name:            test_nxv16i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
-    %0:_(<vscale x 16 x s16>) = COPY $v8
-    %1:_(<vscale x 16 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 16 x s16>) = COPY $v8m4
+    %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv32s16
-body:   |
+name:            test_nxv32i16
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv32s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
-    %0:_(<vscale x 32 x s16>) = COPY $v8
-    %1:_(<vscale x 32 x s16>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 32 x s16>) = COPY $v8m8
+    %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s32
-body:   |
+name:            test_nxv1i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s32
+
+    ; CHECK-LABEL: name: test_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s32
-body:   |
+name:            test_nxv2i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s32
+
+    ; CHECK-LABEL: name: test_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
 
 ...
 ---
-name:  test_nxv4s32
-body:   |
+name:            test_nxv4i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
-    %0:_(<vscale x 4 x s32>) = COPY $v8
-    %1:_(<vscale x 4 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 4 x s32>) = COPY $v8m2
+    %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv8s32
-body:   |
+name:            test_nxv8i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
-    %0:_(<vscale x 8 x s32>) = COPY $v8
-    %1:_(<vscale x 8 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 8 x s32>) = COPY $v8m4
+    %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv16s32
-body:   |
+name:            test_nxv16i32
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv16s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
-    %0:_(<vscale x 16 x s32>) = COPY $v8
-    %1:_(<vscale x 16 x s32>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 16 x s32>) = COPY $v8m8
+    %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
 
 ...
 ---
-name:  test_nxv1s64
-body:   |
+name:            test_nxv1i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv1s64
+
+    ; CHECK-LABEL: name: test_nxv1i64
     ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8 = COPY %2(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
 ...
 ---
-name:  test_nxv2s64
-body:   |
+name:            test_nxv2i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
-    %0:_(<vscale x 2 x s64>) = COPY $v8
-    %1:_(<vscale x 2 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(<vscale x 2 x s64>) = COPY $v8m2
+    %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m2 = COPY %2(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
 
 ...
 ---
-name:  test_nxv4s64
-body:   |
+name:            test_nxv4i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv4s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
-    %0:_(<vscale x 4 x s64>) = COPY $v8
-    %1:_(<vscale x 4 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(<vscale x 4 x s64>) = COPY $v8m4
+    %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m4 = COPY %2(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
 
 ...
 ---
-name:  test_nxv8s64
-body:   |
+name:            test_nxv8i64
+body:             |
   bb.0.entry:
-    ; CHECK-LABEL: name: test_nxv8s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
-    %0:_(<vscale x 8 x s64>) = COPY $v8
-    %1:_(<vscale x 8 x s64>) = COPY $v9
+
+    ; CHECK-LABEL: name: test_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(<vscale x 8 x s64>) = COPY $v8m8
+    %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
-    PseudoRET implicit %2
+    $v8m8 = COPY %2(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
 
 ...



More information about the llvm-commits mailing list