[llvm] [RISCV][GlobalISel] Legalize G_ADD, G_SUB, G_AND, G_OR, G_XOR on RISC-V Vector Extension (PR #71400)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 13 09:47:13 PST 2023


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/71400

>From becfeb72192cbb73818ed925b1a27fae2cafbc61 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Thu, 9 Nov 2023 09:54:22 -0500
Subject: [PATCH 1/7] [RISCV][GISEL] Add support for lowerFormalArguments that
 contain scalable vector types

---
 llvm/lib/CodeGen/GlobalISel/CallLowering.cpp  |   3 +-
 .../CodeGen/GlobalISel/MachineIRBuilder.cpp   |   6 +-
 llvm/lib/CodeGen/LowLevelType.cpp             |   2 +-
 llvm/lib/CodeGen/MachineVerifier.cpp          |  18 +-
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  |  37 +-
 .../RISCV/GlobalISel/irtranslator/fallback.ll |   2 +-
 .../irtranslator/vec-args-bf16-err.ll         |  16 +
 .../irtranslator/vec-args-f16-err.ll          |  16 +
 .../RISCV/GlobalISel/irtranslator/vec-args.ll | 909 ++++++++++++++++++
 llvm/test/MachineVerifier/copy-scalable.mir   |  43 +-
 10 files changed, 991 insertions(+), 61 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll

diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 975787a8f5e734f..2527b1431289677 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -358,7 +358,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
   if (PartLLT.isVector() == LLTy.isVector() &&
       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
       (!PartLLT.isVector() ||
-       PartLLT.getNumElements() == LLTy.getNumElements()) &&
+       PartLLT.getElementCount() == LLTy.getElementCount()) &&
       OrigRegs.size() == 1 && Regs.size() == 1) {
     Register SrcReg = Regs[0];
 
@@ -406,6 +406,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
     // If PartLLT is a mismatched vector in both number of elements and element
     // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
     // have the same elt type, i.e. v4s32.
+    // TODO: Extend this coersion to element multiples other than just 2.
     if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
         PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
         Regs.size() == 1) {
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 5b4e2b725e1dd76..80e9c08e850b683 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -1065,16 +1065,16 @@ void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
 #ifndef NDEBUG
   if (DstTy.isVector()) {
     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
-    assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
+    assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
            "different number of elements in a trunc/ext");
   } else
     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
 
   if (IsExtend)
-    assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
+    assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
            "invalid narrowing extend");
   else
-    assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
+    assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
            "invalid widening trunc");
 #endif
 }
diff --git a/llvm/lib/CodeGen/LowLevelType.cpp b/llvm/lib/CodeGen/LowLevelType.cpp
index 24c30b756737b20..cd85bf606989f9e 100644
--- a/llvm/lib/CodeGen/LowLevelType.cpp
+++ b/llvm/lib/CodeGen/LowLevelType.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
 
 LLT::LLT(MVT VT) {
   if (VT.isVector()) {
-    bool asVector = VT.getVectorMinNumElements() > 1;
+    bool asVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
     init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
          VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
          /*AddressSpace=*/0);
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index da1d9c6f0679c7f..f9a55e4b6c66ce2 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -965,7 +965,7 @@ bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
     return false;
   }
 
-  if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
+  if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
     report("operand types must preserve number of vector elements", MI);
     return false;
   }
@@ -1953,18 +1953,12 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         DstSize = TRI->getRegSizeInBits(*DstRC);
     }
 
-    // The next two checks allow COPY between physical and virtual registers,
-    // when the virtual register has a scalable size and the physical register
-    // has a fixed size. These checks allow COPY between *potentialy* mismatched
-    // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
-    // be able to resolve a fixed size for the scalable vector, and at that
-    // point this function will know for sure whether the sizes are mismatched
-    // and correctly report a size mismatch.
+    // If this is a copy from physical register to virtual register, and if the
+    // Dst is scalable and the Src is fixed, then the Dst can only hold the Src
+    // if the minimum size Dst can hold is at least as big as Src.
     if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
-        !SrcSize.isScalable())
-      break;
-    if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
-        !DstSize.isScalable())
+        !SrcSize.isScalable() &&
+        DstSize.getKnownMinValue() <= SrcSize.getFixedValue())
       break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index a1dbc21ca364666..e73d8863963d0b2 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -14,6 +14,7 @@
 
 #include "RISCVCallLowering.h"
 #include "RISCVISelLowering.h"
+#include "RISCVMachineFunctionInfo.h"
 #include "RISCVSubtarget.h"
 #include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -185,6 +186,9 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
     const DataLayout &DL = MF.getDataLayout();
     const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
 
+    if (LocVT.isScalableVector())
+      MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
+
     if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
                       LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
                       *Subtarget.getTargetLowering(),
@@ -301,8 +305,31 @@ struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
     : CallLowering(&TLI) {}
 
+/// Return true if scalable vector with ScalarTy is legal for lowering.
+static bool isLegalElementTypeForRVV(Type *EltTy,
+                                     const RISCVSubtarget &Subtarget) {
+  if (EltTy->isPointerTy())
+    return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
+  if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) ||
+      EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32))
+    return true;
+  if (EltTy->isIntegerTy(64))
+    return Subtarget.hasVInstructionsI64();
+  if (EltTy->isHalfTy())
+    return Subtarget.hasVInstructionsF16();
+  if (EltTy->isBFloatTy())
+    return Subtarget.hasVInstructionsBF16();
+  if (EltTy->isFloatTy())
+    return Subtarget.hasVInstructionsF32();
+  if (EltTy->isDoubleTy())
+    return Subtarget.hasVInstructionsF64();
+  return false;
+}
+
 // TODO: Support all argument types.
-static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) {
+// TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
+static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
+                                    bool IsLowerArgs = false) {
   // TODO: Integers larger than 2*XLen are passed indirectly which is not
   // supported yet.
   if (T->isIntegerTy())
@@ -311,6 +338,11 @@ static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) {
     return true;
   if (T->isPointerTy())
     return true;
+  // TODO: Support fixed vector types.
+  if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
+      T->isScalableTy() &&
+      isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
+    return true;
   return false;
 }
 
@@ -398,7 +430,8 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
   const RISCVSubtarget &Subtarget =
       MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
   for (auto &Arg : F.args()) {
-    if (!isSupportedArgumentType(Arg.getType(), Subtarget))
+    if (!isSupportedArgumentType(Arg.getType(), Subtarget,
+                                 /*IsLowerArgs=*/true))
       return false;
   }
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
index a3a913d8ce02d83..2ad068eb7dc3d58 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   i64)
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower arguments{{.*}}scalable_arg
+; FALLBACK_WITH_REPORT_ERR:  <unknown>:0:0: unable to translate instruction: call:
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg
 define <vscale x 1 x i8> @scalable_arg(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
 entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll
new file mode 100644
index 000000000000000..f39e7793e5d4f31
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll
@@ -0,0 +1,16 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for bf16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define void @test_args_nxv1bf16(<vscale x 1 x bfloat> %a) {
+entry:
+  ret void
+}
+
+; CHECK: LLVM ERROR: unable to lower arguments: ptr (in function: test_args_nxv1bf16)
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll
new file mode 100644
index 000000000000000..042b455bfb54754
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll
@@ -0,0 +1,16 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for f16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define void @test_args_nxv1f16(<vscale x 1 x half> %a) {
+entry:
+  ret void
+}
+
+; CHECK: LLVM ERROR: unable to lower arguments: ptr (in function: test_args_nxv1f16)
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
new file mode 100644
index 000000000000000..4df0a8f48cc8d0b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
@@ -0,0 +1,909 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+; ==========================================================================
+; ============================= Scalable Types =============================
+; ==========================================================================
+
+define void @test_args_nxv1i8(<vscale x 1 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i8(<vscale x 2 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i8(<vscale x 4 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i8(<vscale x 8 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i8(<vscale x 16 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i8(<vscale x 32 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv64i8(<vscale x 64 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv64i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv64i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i16(<vscale x 1 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i16(<vscale x 2 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i16(<vscale x 4 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i16(<vscale x 8 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i16(<vscale x 16 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i16(<vscale x 32 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i32(<vscale x 1 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i32(<vscale x 2 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i32(<vscale x 4 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i32(<vscale x 8 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i32(<vscale x 16 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i64(<vscale x 1 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i64(<vscale x 2 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i64(<vscale x 4 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i64(<vscale x 8 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv64i1(<vscale x 64 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv64i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv64i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i1(<vscale x 32 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i1(<vscale x 16 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i1(<vscale x 8 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i1(<vscale x 4 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i1(<vscale x 2 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i1(<vscale x 1 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1f32(<vscale x 1 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv1f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2f32(<vscale x 2 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv2f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4f32(<vscale x 4 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv4f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8f32(<vscale x 8 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv8f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16f32(<vscale x 16 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv16f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1f64(<vscale x 1 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv1f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2f64(<vscale x 2 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv2f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4f64(<vscale x 4 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv4f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8f64(<vscale x 8 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv8f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1f16(<vscale x 1 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv1f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2f16(<vscale x 2 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv2f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4f16(<vscale x 4 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv4f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8f16(<vscale x 8 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv8f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16f16(<vscale x 16 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv16f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32f16(<vscale x 32 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv32f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1b16(<vscale x 1 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv1b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2b16(<vscale x 2 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv2b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4b16(<vscale x 4 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv4b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8b16(<vscale x 8 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv8b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16b16(<vscale x 16 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv16b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32b16(<vscale x 32 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv32b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32b16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
diff --git a/llvm/test/MachineVerifier/copy-scalable.mir b/llvm/test/MachineVerifier/copy-scalable.mir
index 28d3e712455012d..f4088f7aed34dde 100644
--- a/llvm/test/MachineVerifier/copy-scalable.mir
+++ b/llvm/test/MachineVerifier/copy-scalable.mir
@@ -3,7 +3,7 @@
 # REQUIRES: riscv64-registered-target
 
 ---
-name:            test_copy_physical_to_virtual_nxv1s8
+name:            test_copy_fixed_to_scalable
 legalized:       true
 regBankSelected: false
 selected:        false
@@ -15,48 +15,9 @@ body:             |
   bb.0:
     liveins: $v8
 
-    ; CHECK-LABEL: name: test_copy_physical_to_virtual_nxv1s8
+    ; CHECK-LABEL: name: test_copy_fixed_to_scalable
     ; CHECK: liveins: $v8
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
 ...
-
----
-name:            test_copy_physical_to_virtual_nxv16s8
-legalized:         true
-tracksRegLiveness: true
-body:             |
-  bb.1.entry:
-    liveins: $v8
-    ; CHECK-LABEL: name: test_copy_physical_to_virtual_nxv16s8
-    ; CHECK: liveins: $v8
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
-    %0:_(<vscale x 16 x s8>) = COPY $v8
-
-...
-
----
-name:            test_copy_virtual_to_physical
-legalized:       true
-regBankSelected: false
-selected:        false
-tracksRegLiveness: true
-registers:
-  - { id: 0, class: _, preferred-register: '' }
-liveins:
-body:             |
-  bb.0:
-    liveins: $v8
-
-    ; CHECK-LABEL: name: test_copy_virtual_to_physical
-    ; CHECK: liveins: $v8
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = IMPLICIT_DEF
-    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
-    ; CHECK-NEXT: PseudoRET implicit $v8
-    %0:_(<vscale x 1 x s8>) = IMPLICIT_DEF
-    $v8 = COPY %0(<vscale x 1 x s8>)
-    PseudoRET implicit $v8
-...

>From b2a0e2903aae34bce020272f9bf49f7140d8e2bd Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 7 Nov 2023 14:37:28 -0800
Subject: [PATCH 2/7] [MachineVerifier] Fix COPY check in MachineVerifier for
 scalable vectors

This change fixes #71518, which compared the KnownMinValue of the
scalable virtual register with the FixedSize of the physical register in
the wrong direction. It turns out that we cannot include this check at all since
it will lead to a false failures. Test cases are added to show that
the false failures no longer occur after this fix.
---
 llvm/lib/CodeGen/MachineVerifier.cpp        | 16 +++++---
 llvm/test/MachineVerifier/copy-scalable.mir | 43 ++++++++++++++++++++-
 2 files changed, 52 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index f9a55e4b6c66ce2..6107fa5c43c57f9 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1953,12 +1953,18 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         DstSize = TRI->getRegSizeInBits(*DstRC);
     }
 
-    // If this is a copy from physical register to virtual register, and if the
-    // Dst is scalable and the Src is fixed, then the Dst can only hold the Src
-    // if the minimum size Dst can hold is at least as big as Src.
+    // The next two checks allow COPY between physical and virtual registers,
+    // when the virtual register has a scalable size and the physical register
+    // has a fixed size. These checks allow COPY between *potentialy* mismatched
+    // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
+    // be able to resolve a fixed size for the scalable vector, and at that
+    // point this function will know for sure whether the sizes are mismatched
+    // and correctly report a size mismatch.
     if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
-        !SrcSize.isScalable() &&
-        DstSize.getKnownMinValue() <= SrcSize.getFixedValue())
+        !SrcSize.isScalable())
+      break;
+    if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
+        !DstSize.isScalable())
       break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
diff --git a/llvm/test/MachineVerifier/copy-scalable.mir b/llvm/test/MachineVerifier/copy-scalable.mir
index f4088f7aed34dde..28d3e712455012d 100644
--- a/llvm/test/MachineVerifier/copy-scalable.mir
+++ b/llvm/test/MachineVerifier/copy-scalable.mir
@@ -3,7 +3,7 @@
 # REQUIRES: riscv64-registered-target
 
 ---
-name:            test_copy_fixed_to_scalable
+name:            test_copy_physical_to_virtual_nxv1s8
 legalized:       true
 regBankSelected: false
 selected:        false
@@ -15,9 +15,48 @@ body:             |
   bb.0:
     liveins: $v8
 
-    ; CHECK-LABEL: name: test_copy_fixed_to_scalable
+    ; CHECK-LABEL: name: test_copy_physical_to_virtual_nxv1s8
     ; CHECK: liveins: $v8
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
 ...
+
+---
+name:            test_copy_physical_to_virtual_nxv16s8
+legalized:         true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $v8
+    ; CHECK-LABEL: name: test_copy_physical_to_virtual_nxv16s8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+
+...
+
+---
+name:            test_copy_virtual_to_physical
+legalized:       true
+regBankSelected: false
+selected:        false
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: _, preferred-register: '' }
+liveins:
+body:             |
+  bb.0:
+    liveins: $v8
+
+    ; CHECK-LABEL: name: test_copy_virtual_to_physical
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 1 x s8>) = IMPLICIT_DEF
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+...

>From ffb8e9fbcbfdd4e0371bd8b114736023f9b85096 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 8 Nov 2023 19:22:21 -0500
Subject: [PATCH 3/7] [RISCV][GISEL] Legalize G_ADD, G_SUB, G_AND, G_OR, G_XOR;
 G_ADD legalized

---
 llvm/lib/CodeGen/MachineVerifier.cpp          |   8 +
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  31 +-
 .../legalizer/rv32/legalize-add.mir           | 353 +++++++++++++++--
 .../legalizer/rv64/legalize-add.mir           | 366 ++++++++++++++++--
 4 files changed, 707 insertions(+), 51 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 6107fa5c43c57f9..36c4f14938feb16 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1946,6 +1946,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         SrcSize = TRI->getRegSizeInBits(*SrcRC);
     }
 
+    if (SrcSize.isZero())
+      SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
+
     if (DstReg.isPhysical() && SrcTy.isValid()) {
       const TargetRegisterClass *DstRC =
           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
@@ -1966,6 +1969,11 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
     if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
         !DstSize.isScalable())
       break;
+    // If the Src is scalable and the Dst is fixed, then Dest can only hold
+    // the Src is known to fit in Dest
+    if (SrcSize.isScalable() && !DstSize.isScalable() &&
+        TypeSize::isKnownLE(DstSize, SrcSize))
+      break;
 
     if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 5acc933438f6bea..1ddee5a2d6f6c0c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -33,10 +33,39 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
   const LLT s32 = LLT::scalar(32);
   const LLT s64 = LLT::scalar(64);
 
+  const LLT nxv1s8 = LLT::scalable_vector(1, s8);
+  const LLT nxv2s8 = LLT::scalable_vector(2, s8);
+  const LLT nxv4s8 = LLT::scalable_vector(4, s8);
+  const LLT nxv8s8 = LLT::scalable_vector(8, s8);
+  const LLT nxv16s8 = LLT::scalable_vector(16, s8);
+  const LLT nxv32s8 = LLT::scalable_vector(32, s8);
+  const LLT nxv64s8 = LLT::scalable_vector(64, s8);
+
+  const LLT nxv1s16 = LLT::scalable_vector(1, s16);
+  const LLT nxv2s16 = LLT::scalable_vector(2, s16);
+  const LLT nxv4s16 = LLT::scalable_vector(4, s16);
+  const LLT nxv8s16 = LLT::scalable_vector(8, s16);
+  const LLT nxv16s16 = LLT::scalable_vector(16, s16);
+  const LLT nxv32s16 = LLT::scalable_vector(32, s16);
+
+  const LLT nxv1s32 = LLT::scalable_vector(1, s32);
+  const LLT nxv2s32 = LLT::scalable_vector(2, s32);
+  const LLT nxv4s32 = LLT::scalable_vector(4, s32);
+  const LLT nxv8s32 = LLT::scalable_vector(8, s32);
+  const LLT nxv16s32 = LLT::scalable_vector(16, s32);
+
+  const LLT nxv1s64 = LLT::scalable_vector(1, s64);
+  const LLT nxv2s64 = LLT::scalable_vector(2, s64);
+  const LLT nxv4s64 = LLT::scalable_vector(4, s64);
+  const LLT nxv8s64 = LLT::scalable_vector(8, s64);
+
   using namespace TargetOpcode;
 
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
-      .legalFor({s32, sXLen})
+      .legalFor({s32, sXLen, nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8,
+                 nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16,
+                 nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
+                 nxv1s64, nxv2s64, nxv4s64, nxv8s64})
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, sXLen);
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
index d169eb316dfcb7a..2c63b92c91b4f36 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
@@ -142,29 +142,30 @@ body:             |
 ---
 name:            add_i96
 body:             |
+  ; CHECK-LABEL: name: add_i96
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   %lo1:_(s32) = COPY $x10
+  ; CHECK-NEXT:   %mid1:_(s32) = COPY $x11
+  ; CHECK-NEXT:   %hi1:_(s32) = COPY $x12
+  ; CHECK-NEXT:   %lo2:_(s32) = COPY $x13
+  ; CHECK-NEXT:   %mid2:_(s32) = COPY $x14
+  ; CHECK-NEXT:   %hi2:_(s32) = COPY $x15
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
+  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
+  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
+  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
+  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
+  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s32)
+  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s32)
+  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
-    ; CHECK-LABEL: name: add_i96
-    ; CHECK: %lo1:_(s32) = COPY $x10
-    ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
-    ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
-    ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
-    ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
-    ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
-    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
-    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
-    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
-    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
-    ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
-    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
-    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s32) = COPY $x10
     %mid1:_(s32) = COPY $x11
     %hi1:_(s32) = COPY $x12
@@ -181,3 +182,311 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:		test_nxv1s8
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  ; CHECK-LABEL: name: test_nxv64s8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+  bb.0.entry:
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s16
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  ; CHECK-LABEL: name: test_nxv32s16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+  bb.0.entry:
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s32
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  ; CHECK-LABEL: name: test_nxv16s32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+  bb.0.entry:
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s64
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
index f394e4d5064edc5..b4eefb7354511a2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
@@ -174,35 +174,36 @@ body:             |
 ---
 name:            add_i192
 body:             |
+  ; CHECK-LABEL: name: add_i192
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   %lo1:_(s64) = COPY $x10
+  ; CHECK-NEXT:   %mid1:_(s64) = COPY $x11
+  ; CHECK-NEXT:   %hi1:_(s64) = COPY $x12
+  ; CHECK-NEXT:   %lo2:_(s64) = COPY $x13
+  ; CHECK-NEXT:   %mid2:_(s64) = COPY $x14
+  ; CHECK-NEXT:   %hi2:_(s64) = COPY $x15
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
+  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
+  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
+  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
+  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
+  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s64)
+  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s64)
+  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
-    ; CHECK-LABEL: name: add_i192
-    ; CHECK: %lo1:_(s64) = COPY $x10
-    ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
-    ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
-    ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
-    ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
-    ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
-    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
-    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
-    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
-    ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
-    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
-    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s64) = COPY $x10
     %mid1:_(s64) = COPY $x11
     %hi1:_(s64) = COPY $x12
@@ -219,3 +220,312 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:		test_nxv1s8
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  ; CHECK-LABEL: name: test_nxv64s8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
+  bb.0.entry:
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s16
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  ; CHECK-LABEL: name: test_nxv32s16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
+  bb.0.entry:
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s32
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  ; CHECK-LABEL: name: test_nxv16s32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
+  bb.0.entry:
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:		test_nxv1s64
+body:		 |
+  bb.0.entry:
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
+    PseudoRET implicit %2
+

>From f254a97ce38ea237c7090cadb21ca3dd0b6ea25d Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Thu, 9 Nov 2023 11:18:42 -0500
Subject: [PATCH 4/7] update G_ADD mir test cases

---
 .../legalizer/rv32/legalize-add.mir           | 54 ++++++++-------
 .../legalizer/rv64/legalize-add.mir           | 66 ++++++++++---------
 2 files changed, 64 insertions(+), 56 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
index 2c63b92c91b4f36..67d187e61106b17 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
@@ -142,30 +142,29 @@ body:             |
 ---
 name:            add_i96
 body:             |
-  ; CHECK-LABEL: name: add_i96
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   %lo1:_(s32) = COPY $x10
-  ; CHECK-NEXT:   %mid1:_(s32) = COPY $x11
-  ; CHECK-NEXT:   %hi1:_(s32) = COPY $x12
-  ; CHECK-NEXT:   %lo2:_(s32) = COPY $x13
-  ; CHECK-NEXT:   %mid2:_(s32) = COPY $x14
-  ; CHECK-NEXT:   %hi2:_(s32) = COPY $x15
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
-  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
-  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
-  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
-  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
-  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
-  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
-  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
-  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s32)
-  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s32)
-  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s32)
-  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
+    ; CHECK-LABEL: name: add_i96
+    ; CHECK: %lo1:_(s32) = COPY $x10
+    ; CHECK-NEXT: %mid1:_(s32) = COPY $x11
+    ; CHECK-NEXT: %hi1:_(s32) = COPY $x12
+    ; CHECK-NEXT: %lo2:_(s32) = COPY $x13
+    ; CHECK-NEXT: %mid2:_(s32) = COPY $x14
+    ; CHECK-NEXT: %hi2:_(s32) = COPY $x15
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
+    ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s32) = COPY $x10
     %mid1:_(s32) = COPY $x11
     %hi1:_(s32) = COPY $x12
@@ -183,9 +182,14 @@ body:             |
 
 ...
 ---
-name:		test_nxv1s8
-body:		 |
+name:  test_nxv1s8
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
index b4eefb7354511a2..2c06276ec2ab732 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
@@ -174,36 +174,35 @@ body:             |
 ---
 name:            add_i192
 body:             |
-  ; CHECK-LABEL: name: add_i192
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   %lo1:_(s64) = COPY $x10
-  ; CHECK-NEXT:   %mid1:_(s64) = COPY $x11
-  ; CHECK-NEXT:   %hi1:_(s64) = COPY $x12
-  ; CHECK-NEXT:   %lo2:_(s64) = COPY $x13
-  ; CHECK-NEXT:   %mid2:_(s64) = COPY $x14
-  ; CHECK-NEXT:   %hi2:_(s64) = COPY $x15
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
-  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
-  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
-  ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
-  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
-  ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
-  ; CHECK-NEXT:   [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
-  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-  ; CHECK-NEXT:   [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
-  ; CHECK-NEXT:   $x10 = COPY [[ADD]](s64)
-  ; CHECK-NEXT:   $x11 = COPY [[ADD2]](s64)
-  ; CHECK-NEXT:   $x12 = COPY [[ADD4]](s64)
-  ; CHECK-NEXT:   PseudoRET implicit $x10, implicit $x11, implicit $x12
   bb.0.entry:
+    ; CHECK-LABEL: name: add_i192
+    ; CHECK: %lo1:_(s64) = COPY $x10
+    ; CHECK-NEXT: %mid1:_(s64) = COPY $x11
+    ; CHECK-NEXT: %hi1:_(s64) = COPY $x12
+    ; CHECK-NEXT: %lo2:_(s64) = COPY $x13
+    ; CHECK-NEXT: %mid2:_(s64) = COPY $x14
+    ; CHECK-NEXT: %hi2:_(s64) = COPY $x15
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[ADD2]](s64), [[C]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
+    ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+    ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+    ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %lo1:_(s64) = COPY $x10
     %mid1:_(s64) = COPY $x11
     %hi1:_(s64) = COPY $x12
@@ -221,9 +220,14 @@ body:             |
 
 ...
 ---
-name:		test_nxv1s8
-body:		 |
+name:  test_nxv1s8
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s8>)
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_ADD %0, %1

>From e2f73276405204b52737aead9b8f391bccaf67b2 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Thu, 9 Nov 2023 14:06:15 -0500
Subject: [PATCH 5/7] add test cases for G_SUB, G_AND, G_OR, and G_XOR

---
 .../legalizer/rv32/legalize-add.mir           |  62 ++--
 .../legalizer/rv32/legalize-and.mir           | 328 ++++++++++++++++++
 .../GlobalISel/legalizer/rv32/legalize-or.mir | 327 +++++++++++++++++
 .../legalizer/rv32/legalize-sub.mir           | 327 +++++++++++++++++
 .../legalizer/rv32/legalize-xor.mir           | 327 +++++++++++++++++
 .../legalizer/rv64/legalize-add.mir           |  61 ++--
 .../legalizer/rv64/legalize-and.mir           | 327 +++++++++++++++++
 .../GlobalISel/legalizer/rv64/legalize-or.mir | 327 +++++++++++++++++
 .../legalizer/rv64/legalize-sub.mir           | 328 ++++++++++++++++++
 .../legalizer/rv64/legalize-xor.mir           | 327 +++++++++++++++++
 10 files changed, 2693 insertions(+), 48 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
index 67d187e61106b17..14869dbb99e0fa1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-add.mir
@@ -274,13 +274,12 @@ body:   |
 ---
 name:  test_nxv64s8
 body:   |
-  ; CHECK-LABEL: name: test_nxv64s8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
     %0:_(<vscale x 64 x s8>) = COPY $v8
     %1:_(<vscale x 64 x s8>) = COPY $v9
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -288,9 +287,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s16
-body:		 |
+name:  test_nxv1s16
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -359,13 +363,12 @@ body:   |
 ---
 name:  test_nxv32s16
 body:   |
-  ; CHECK-LABEL: name: test_nxv32s16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
     %0:_(<vscale x 32 x s16>) = COPY $v8
     %1:_(<vscale x 32 x s16>) = COPY $v9
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -373,9 +376,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s32
-body:		 |
+name:  test_nxv1s32
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -429,13 +437,12 @@ body:   |
 ---
 name:  test_nxv16s32
 body:   |
-  ; CHECK-LABEL: name: test_nxv16s32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
     %0:_(<vscale x 16 x s32>) = COPY $v8
     %1:_(<vscale x 16 x s32>) = COPY $v9
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -443,9 +450,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s64
-body:		 |
+name:  test_nxv1s64
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -494,3 +506,5 @@ body:   |
     %1:_(<vscale x 8 x s64>) = COPY $v9
     %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
     PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-and.mir
index d5c13f403a0dee6..1b30c2752084f26 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-and.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-and.mir
@@ -169,3 +169,331 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-or.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-or.mir
index 881f826e0ed0458..a9c9e282421aaa8 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-or.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-or.mir
@@ -169,3 +169,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-sub.mir
index 258d02646186cdf..2eb839b9527a2e3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-sub.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-sub.mir
@@ -181,3 +181,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-xor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-xor.mir
index c0ba3e95da9cdeb..6ecfcbb9b86d4c2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-xor.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-xor.mir
@@ -169,3 +169,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
index 2c06276ec2ab732..9df48ad2028c93e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-add.mir
@@ -312,13 +312,12 @@ body:   |
 ---
 name:  test_nxv64s8
 body:   |
-  ; CHECK-LABEL: name: test_nxv64s8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
     %0:_(<vscale x 64 x s8>) = COPY $v8
     %1:_(<vscale x 64 x s8>) = COPY $v9
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -326,9 +325,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s16
-body:		 |
+name:  test_nxv1s16
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s16>)
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -397,13 +401,12 @@ body:   |
 ---
 name:  test_nxv32s16
 body:   |
-  ; CHECK-LABEL: name: test_nxv32s16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
     %0:_(<vscale x 32 x s16>) = COPY $v8
     %1:_(<vscale x 32 x s16>) = COPY $v9
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -411,9 +414,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s32
-body:		 |
+name:  test_nxv1s32
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s32>)
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -467,13 +475,12 @@ body:   |
 ---
 name:  test_nxv16s32
 body:   |
-  ; CHECK-LABEL: name: test_nxv16s32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
     %0:_(<vscale x 16 x s32>) = COPY $v8
     %1:_(<vscale x 16 x s32>) = COPY $v9
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -481,9 +488,14 @@ body:   |
 
 ...
 ---
-name:		test_nxv1s64
-body:		 |
+name:  test_nxv1s64
+body:   |
   bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 1 x s64>)
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -533,3 +545,4 @@ body:   |
     %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
     PseudoRET implicit %2
 
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-and.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-and.mir
index 89541575cf1c8f2..74152e83c5d1115 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-and.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-and.mir
@@ -201,3 +201,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[AND]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_AND %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-or.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-or.mir
index 3c56929ef67bd23..dc7645743905edf 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-or.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-or.mir
@@ -201,3 +201,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[OR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_OR %0, %1
+    PseudoRET implicit %2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-sub.mir
index c2504273c2af67e..8ae992ff751cc9c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-sub.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-sub.mir
@@ -219,3 +219,331 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[SUB]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_SUB %0, %1
+    PseudoRET implicit %2
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-xor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-xor.mir
index 469f8b25f7ec1ea..c1747b2f04dd5f2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-xor.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv64/legalize-xor.mir
@@ -201,3 +201,330 @@ body:             |
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 
 ...
+---
+name:  test_nxv1s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s8>)
+    %0:_(<vscale x 1 x s8>) = COPY $v8
+    %1:_(<vscale x 1 x s8>) = COPY $v9
+    %2:_(<vscale x 1 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv2s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s8>)
+    %0:_(<vscale x 2 x s8>) = COPY $v8
+    %1:_(<vscale x 2 x s8>) = COPY $v9
+    %2:_(<vscale x 2 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s8>)
+    %0:_(<vscale x 4 x s8>) = COPY $v8
+    %1:_(<vscale x 4 x s8>) = COPY $v9
+    %2:_(<vscale x 4 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s8>)
+    %0:_(<vscale x 8 x s8>) = COPY $v8
+    %1:_(<vscale x 8 x s8>) = COPY $v9
+    %2:_(<vscale x 8 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s8>)
+    %0:_(<vscale x 16 x s8>) = COPY $v8
+    %1:_(<vscale x 16 x s8>) = COPY $v9
+    %2:_(<vscale x 16 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s8>)
+    %0:_(<vscale x 32 x s8>) = COPY $v8
+    %1:_(<vscale x 32 x s8>) = COPY $v9
+    %2:_(<vscale x 32 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv64s8
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv64s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 64 x s8>)
+    %0:_(<vscale x 64 x s8>) = COPY $v8
+    %1:_(<vscale x 64 x s8>) = COPY $v9
+    %2:_(<vscale x 64 x s8>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s16>)
+    %0:_(<vscale x 1 x s16>) = COPY $v8
+    %1:_(<vscale x 1 x s16>) = COPY $v9
+    %2:_(<vscale x 1 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s16>)
+    %0:_(<vscale x 2 x s16>) = COPY $v8
+    %1:_(<vscale x 2 x s16>) = COPY $v9
+    %2:_(<vscale x 2 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s16>)
+    %0:_(<vscale x 4 x s16>) = COPY $v8
+    %1:_(<vscale x 4 x s16>) = COPY $v9
+    %2:_(<vscale x 4 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s16>)
+    %0:_(<vscale x 8 x s16>) = COPY $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v9
+    %2:_(<vscale x 8 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s16>)
+    %0:_(<vscale x 16 x s16>) = COPY $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v9
+    %2:_(<vscale x 16 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv32s16
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv32s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 32 x s16>)
+    %0:_(<vscale x 32 x s16>) = COPY $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v9
+    %2:_(<vscale x 32 x s16>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s32>)
+    %0:_(<vscale x 1 x s32>) = COPY $v8
+    %1:_(<vscale x 1 x s32>) = COPY $v9
+    %2:_(<vscale x 1 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s32>)
+    %0:_(<vscale x 2 x s32>) = COPY $v8
+    %1:_(<vscale x 2 x s32>) = COPY $v9
+    %2:_(<vscale x 2 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s32>)
+    %0:_(<vscale x 4 x s32>) = COPY $v8
+    %1:_(<vscale x 4 x s32>) = COPY $v9
+    %2:_(<vscale x 4 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s32>)
+    %0:_(<vscale x 8 x s32>) = COPY $v8
+    %1:_(<vscale x 8 x s32>) = COPY $v9
+    %2:_(<vscale x 8 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv16s32
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv16s32
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 16 x s32>)
+    %0:_(<vscale x 16 x s32>) = COPY $v8
+    %1:_(<vscale x 16 x s32>) = COPY $v9
+    %2:_(<vscale x 16 x s32>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv1s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv1s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 1 x s64>)
+    %0:_(<vscale x 1 x s64>) = COPY $v8
+    %1:_(<vscale x 1 x s64>) = COPY $v9
+    %2:_(<vscale x 1 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+...
+---
+name:  test_nxv2s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv2s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 2 x s64>)
+    %0:_(<vscale x 2 x s64>) = COPY $v8
+    %1:_(<vscale x 2 x s64>) = COPY $v9
+    %2:_(<vscale x 2 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv4s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv4s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 4 x s64>)
+    %0:_(<vscale x 4 x s64>) = COPY $v8
+    %1:_(<vscale x 4 x s64>) = COPY $v9
+    %2:_(<vscale x 4 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...
+---
+name:  test_nxv8s64
+body:   |
+  bb.0.entry:
+    ; CHECK-LABEL: name: test_nxv8s64
+    ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit [[XOR]](<vscale x 8 x s64>)
+    %0:_(<vscale x 8 x s64>) = COPY $v8
+    %1:_(<vscale x 8 x s64>) = COPY $v9
+    %2:_(<vscale x 8 x s64>) = G_XOR %0, %1
+    PseudoRET implicit %2
+
+...

>From ce174942366c9a1c06c43221fa7ba6e8fa6ed63d Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 13 Nov 2023 10:44:53 -0500
Subject: [PATCH 6/7] add hasVInstruction for vector types

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp   | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 1ddee5a2d6f6c0c..ad435e7a37087ca 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -61,11 +61,18 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
 
   using namespace TargetOpcode;
 
-  getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
-      .legalFor({s32, sXLen, nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8, nxv64s8,
-                 nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16, nxv32s16,
-                 nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
-                 nxv1s64, nxv2s64, nxv4s64, nxv8s64})
+  const std::initializer_list<LLT> ALOpLegalScalarTypes = {s32, sXLen};
+  const std::initializer_list<LLT> ALOpLegalVExtendedTypes = {
+      s32,     sXLen,    nxv1s8,   nxv2s8,  nxv4s8,  nxv8s8,
+      nxv16s8, nxv32s8,  nxv64s8,  nxv1s16, nxv2s16, nxv4s16,
+      nxv8s16, nxv16s16, nxv32s16, nxv1s32, nxv2s32, nxv4s32,
+      nxv8s32, nxv16s32, nxv1s64,  nxv2s64, nxv4s64, nxv8s64};
+
+
+  auto ALOpLegalTypes =  ST.hasVInstructions() ? ALOpLegalVExtendedTypes : ALOpLegalScalarTypes; 
+  
+	getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
+      .legalFor(ALOpLegalTypes) 
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, sXLen);
 

>From 22656c21ac7552a599051478fe20649001ece1c9 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 13 Nov 2023 12:46:43 -0500
Subject: [PATCH 7/7] use initializer list in the ternary since it's
 unnecessary to create scalar/vector type variables

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 24 +++++++++----------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index ad435e7a37087ca..152fe2dcfe760c1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -61,18 +61,18 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
 
   using namespace TargetOpcode;
 
-  const std::initializer_list<LLT> ALOpLegalScalarTypes = {s32, sXLen};
-  const std::initializer_list<LLT> ALOpLegalVExtendedTypes = {
-      s32,     sXLen,    nxv1s8,   nxv2s8,  nxv4s8,  nxv8s8,
-      nxv16s8, nxv32s8,  nxv64s8,  nxv1s16, nxv2s16, nxv4s16,
-      nxv8s16, nxv16s16, nxv32s16, nxv1s32, nxv2s32, nxv4s32,
-      nxv8s32, nxv16s32, nxv1s64,  nxv2s64, nxv4s64, nxv8s64};
-
-
-  auto ALOpLegalTypes =  ST.hasVInstructions() ? ALOpLegalVExtendedTypes : ALOpLegalScalarTypes; 
-  
-	getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
-      .legalFor(ALOpLegalTypes) 
+  auto ALOpLegalTypes =
+      ST.hasVInstructions()
+          ? std::initializer_list<LLT>{s32,     sXLen,    nxv1s8,   nxv2s8,
+                                       nxv4s8,  nxv8s8,   nxv16s8,  nxv32s8,
+                                       nxv64s8, nxv1s16,  nxv2s16,  nxv4s16,
+                                       nxv8s16, nxv16s16, nxv32s16, nxv1s32,
+                                       nxv2s32, nxv4s32,  nxv8s32,  nxv16s32,
+                                       nxv1s64, nxv2s64,  nxv4s64,  nxv8s64}
+          : std::initializer_list<LLT>{s32, sXLen};
+
+  getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
+      .legalFor(ALOpLegalTypes)
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, sXLen);
 



More information about the llvm-commits mailing list