[llvm] 2ff7a4c - [RISCV][NFC] Simplify some rvv regbankselect cases (#155961)

via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 31 19:59:59 PDT 2025


Author: Jianjian Guan
Date: 2025-09-01T10:59:54+08:00
New Revision: 2ff7a4ccc9e72e99a32683248709300498480082

URL: https://github.com/llvm/llvm-project/commit/2ff7a4ccc9e72e99a32683248709300498480082
DIFF: https://github.com/llvm/llvm-project/commit/2ff7a4ccc9e72e99a32683248709300498480082.diff

LOG: [RISCV][NFC] Simplify some rvv regbankselect cases (#155961)

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
index 759c28543f1e5..ab3d777856d6e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/add.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 ---
 name:            vadd_vv_nxv1i8
 legalized:       true
@@ -13,23 +13,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv1i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv1i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv1i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
@@ -45,23 +36,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv2i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv2i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv2i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
@@ -77,23 +59,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv4i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv4i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv4i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
@@ -109,23 +82,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv8i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv8i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv8i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
@@ -141,23 +105,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vadd_vv_nxv16i8
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv16i8
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vadd_vv_nxv16i8
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
@@ -173,23 +128,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vadd_vv_nxv32i8
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv32i8
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vadd_vv_nxv32i8
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 32 x s8>) = COPY $v8m4
     %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
@@ -205,23 +151,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vadd_vv_nxv64i8
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv64i8
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vadd_vv_nxv64i8
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 64 x s8>) = COPY $v8m8
     %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
@@ -237,23 +174,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv1i16
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv1i16
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv1i16
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
@@ -269,23 +197,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv2i16
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv2i16
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv2i16
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
@@ -301,23 +220,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv4i16
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv4i16
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv4i16
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
@@ -333,23 +243,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vadd_vv_nxv8i16
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv8i16
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vadd_vv_nxv8i16
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
@@ -365,23 +266,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vadd_vv_nxv16i16
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv16i16
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vadd_vv_nxv16i16
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8m4
     %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
@@ -397,23 +289,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vadd_vv_nxv32i16
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv32i16
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vadd_vv_nxv32i16
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8m8
     %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
@@ -429,23 +312,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv1i32
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv1i32
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv1i32
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
@@ -461,23 +335,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv2i32
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv2i32
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv2i32
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
@@ -493,23 +358,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vadd_vv_nxv4i32
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv4i32
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vadd_vv_nxv4i32
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = COPY $v8m2
     %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
@@ -525,23 +381,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vadd_vv_nxv8i32
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv8i32
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vadd_vv_nxv8i32
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = COPY $v8m4
     %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
@@ -557,23 +404,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vadd_vv_nxv16i32
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv16i32
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vadd_vv_nxv16i32
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = COPY $v8m8
     %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
@@ -589,23 +427,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vadd_vv_nxv1i64
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv1i64
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vadd_vv_nxv1i64
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
@@ -621,23 +450,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vadd_vv_nxv2i64
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv2i64
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vadd_vv_nxv2i64
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = COPY $v8m2
     %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
@@ -653,23 +473,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vadd_vv_nxv4i64
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv4i64
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vadd_vv_nxv4i64
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = COPY $v8m4
     %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
@@ -685,23 +496,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vadd_vv_nxv8i64
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
-    ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vadd_vv_nxv8i64
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vadd_vv_nxv8i64
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = COPY $v8m8
     %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_ADD %0, %1

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
index 4ec6422716476..4d1b11b6921dd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 
 ---
 name:            anyext_nxv1i16_nxv1i8
@@ -14,21 +14,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv1i16_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv1i16_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv1i16_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = G_ANYEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s16>)
@@ -43,21 +35,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv1i32_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = G_ANYEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -72,21 +56,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -101,21 +77,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv2i16_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv2i16_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv2i16_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = G_ANYEXT %0(<vscale x 2 x s8>)
     $v8 = COPY %1(<vscale x 2 x s16>)
@@ -130,21 +98,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv2i32_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = G_ANYEXT %0(<vscale x 2 x s8>)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -159,21 +119,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s8>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -188,21 +140,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv4i16_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv4i16_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv4i16_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = G_ANYEXT %0(<vscale x 4 x s8>)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -217,21 +161,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: anyext_nxv4i32_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s32>) = G_ANYEXT %0(<vscale x 4 x s8>)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -246,21 +182,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s8>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -275,21 +203,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv8i16_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: anyext_nxv8i16_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: anyext_nxv8i16_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s16>) = G_ANYEXT %0(<vscale x 8 x s8>)
     $v8m2 = COPY %1(<vscale x 8 x s16>)
@@ -304,21 +224,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: anyext_nxv8i32_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s32>) = G_ANYEXT %0(<vscale x 8 x s8>)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -333,21 +245,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s8>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -362,21 +266,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv16i16_nxv16i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: anyext_nxv16i16_nxv16i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: anyext_nxv16i16_nxv16i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s16>) = G_ANYEXT %0(<vscale x 16 x s8>)
     $v8m4 = COPY %1(<vscale x 16 x s16>)
@@ -391,21 +287,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: anyext_nxv16i32_nxv16i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s8>) = COPY $v8m4
     %1:_(<vscale x 16 x s32>) = G_ANYEXT %0(<vscale x 16 x s8>)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -420,21 +308,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv32i16_nxv32i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv32i16_nxv32i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: anyext_nxv32i16_nxv32i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s8>) = COPY $v8m4
     %1:_(<vscale x 32 x s16>) = G_ANYEXT %0(<vscale x 32 x s8>)
     $v8m8 = COPY %1(<vscale x 32 x s16>)
@@ -449,21 +329,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv1i32_nxv1i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = G_ANYEXT %0(<vscale x 1 x s16>)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -478,21 +350,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s16>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -507,21 +371,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv2i32_nxv2i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = G_ANYEXT %0(<vscale x 2 x s16>)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -536,21 +392,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s16>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -565,21 +413,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: anyext_nxv4i32_nxv4i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s32>) = G_ANYEXT %0(<vscale x 4 x s16>)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -594,21 +434,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s16>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -623,21 +455,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: anyext_nxv8i32_nxv8i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s32>) = G_ANYEXT %0(<vscale x 8 x s16>)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -652,21 +476,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s16>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -681,21 +497,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: anyext_nxv16i32_nxv16i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s16>) = COPY $v8m4
     %1:_(<vscale x 16 x s32>) = G_ANYEXT %0(<vscale x 16 x s16>)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -710,21 +518,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s32>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -739,21 +539,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s32>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -768,21 +560,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s32>) = COPY $v8m2
     %1:_(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s32>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -797,21 +581,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s32>) = COPY $v8m4
     %1:_(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s32>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
index 925d6aee47490..8447b3602165a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 
 ---
 name:            icmp_nxv1i1
@@ -12,17 +12,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv1i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv1i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv1i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s1>), %0
     $v8 = COPY %1(<vscale x 1 x s1>)
@@ -35,17 +29,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv2i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv2i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv2i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s1>), %0
     $v8 = COPY %1(<vscale x 2 x s1>)
@@ -58,17 +46,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv4i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv4i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv4i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s1>), %0
     $v8 = COPY %1(<vscale x 4 x s1>)
@@ -81,17 +63,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv8i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv8i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv8i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s1>), %0
     $v8 = COPY %1(<vscale x 8 x s1>)
@@ -104,17 +80,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv16i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv16i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv16i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s1>), %0
     $v8 = COPY %1(<vscale x 16 x s1>)
@@ -127,17 +97,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv32i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv32i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv32i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 32 x s1>), %0
     $v8 = COPY %1(<vscale x 32 x s1>)
@@ -150,17 +114,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv64i1
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s1>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv64i1
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s1>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv64i1
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s1>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
     %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 64 x s1>), %0
     $v8 = COPY %1(<vscale x 64 x s1>)
@@ -173,17 +131,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv1i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv1i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv1i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s8>), %0
     $v8 = COPY %1(<vscale x 1 x s1>)
@@ -196,17 +148,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv2i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv2i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv2i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s8>), %0
     $v8 = COPY %1(<vscale x 2 x s1>)
@@ -219,17 +165,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv4i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv4i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv4i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s8>), %0
     $v8 = COPY %1(<vscale x 4 x s1>)
@@ -242,17 +182,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv8i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv8i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv8i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s8>), %0
     $v8 = COPY %1(<vscale x 8 x s1>)
@@ -265,17 +199,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv16i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv16i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv16i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s8>), %0
     $v8 = COPY %1(<vscale x 16 x s1>)
@@ -288,17 +216,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv32i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv32i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv32i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 32 x s8>), %0
     $v8 = COPY %1(<vscale x 32 x s1>)
@@ -311,17 +233,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv64i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv64i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv64i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
     %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 64 x s8>), %0
     $v8 = COPY %1(<vscale x 64 x s1>)
@@ -334,17 +250,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv1i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv1i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv1i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
     %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s16>), %0
     $v8 = COPY %1(<vscale x 1 x s1>)
@@ -357,17 +267,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv2i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv2i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv2i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
     %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s16>), %0
     $v8 = COPY %1(<vscale x 2 x s1>)
@@ -380,17 +284,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv4i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv4i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv4i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
     %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s16>), %0
     $v8 = COPY %1(<vscale x 4 x s1>)
@@ -403,17 +301,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv8i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv8i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv8i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
     %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s16>), %0
     $v8 = COPY %1(<vscale x 8 x s1>)
@@ -426,17 +318,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv16i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv16i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv16i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
     %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s16>), %0
     $v8 = COPY %1(<vscale x 16 x s1>)
@@ -449,17 +335,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv32i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv32i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv32i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
     %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 32 x s16>), %0
     $v8 = COPY %1(<vscale x 32 x s1>)
@@ -472,17 +352,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv1i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv1i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv1i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
     %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s32>), %0
     $v8 = COPY %1(<vscale x 1 x s1>)
@@ -495,17 +369,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv2i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv2i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv2i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
     %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s32>), %0
     $v8 = COPY %1(<vscale x 2 x s1>)
@@ -518,17 +386,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv4i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv4i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv4i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
     %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s32>), %0
     $v8 = COPY %1(<vscale x 4 x s1>)
@@ -541,17 +403,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv8i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv8i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv8i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
     %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s32>), %0
     $v8 = COPY %1(<vscale x 8 x s1>)
@@ -564,17 +420,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv16i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv16i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv16i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
     %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s32>), %0
     $v8 = COPY %1(<vscale x 16 x s1>)
@@ -587,17 +437,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv1i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv1i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv1i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
     %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s64>), %0
     $v8 = COPY %1(<vscale x 1 x s1>)
@@ -610,17 +454,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv2i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv2i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv2i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
     %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s64>), %0
     $v8 = COPY %1(<vscale x 2 x s1>)
@@ -633,17 +471,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv4i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv4i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv4i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
     %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s64>), %0
     $v8 = COPY %1(<vscale x 4 x s1>)
@@ -656,17 +488,11 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: icmp_nxv8i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
-    ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: icmp_nxv8i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
-    ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: icmp_nxv8i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
     %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s64>), %0
     $v8 = COPY %1(<vscale x 8 x s1>)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
index d3fb51cf91e3a..017da0d536af7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
@@ -1,25 +1,20 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 ---
 name:            implicitdef_nxv1i8
 legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv1i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv1i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv1i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 1 x s8>)
     PseudoRET implicit $v8
@@ -30,15 +25,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv2i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv2i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv2i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 2 x s8>)
     PseudoRET implicit $v8
@@ -49,15 +39,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv4i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv4i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv4i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 4 x s8>)
     PseudoRET implicit $v8
@@ -68,15 +53,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv8i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv8i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv8i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 8 x s8>)
     PseudoRET implicit $v8
@@ -87,15 +67,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv16i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv16i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: implicitdef_nxv16i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
     $v8m2 = COPY %0(<vscale x 16 x s8>)
     PseudoRET implicit $v8m2
@@ -106,15 +81,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv32i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv32i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: implicitdef_nxv32i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
     $v8m4 = COPY %0(<vscale x 32 x s8>)
     PseudoRET implicit $v8m4
@@ -125,15 +95,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv64i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv64i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: implicitdef_nxv64i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
     $v8m8 = COPY %0(<vscale x 64 x s8>)
     PseudoRET implicit $v8m8
@@ -144,15 +109,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv1i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv1i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv1i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 1 x s16>)
     PseudoRET implicit $v8
@@ -163,15 +123,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv2i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv2i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv2i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 2 x s16>)
     PseudoRET implicit $v8
@@ -182,15 +137,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv4i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv4i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv4i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 4 x s16>)
     PseudoRET implicit $v8
@@ -201,15 +151,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv8i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv8i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: implicitdef_nxv8i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
     $v8m2 = COPY %0(<vscale x 8 x s16>)
     PseudoRET implicit $v8m2
@@ -220,15 +165,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv16i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv16i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: implicitdef_nxv16i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
     $v8m4 = COPY %0(<vscale x 16 x s16>)
     PseudoRET implicit $v8m4
@@ -239,15 +179,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv32i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv32i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: implicitdef_nxv32i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
     $v8m8 = COPY %0(<vscale x 32 x s16>)
     PseudoRET implicit $v8m8
@@ -258,15 +193,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv1i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv1i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv1i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 1 x s32>)
     PseudoRET implicit $v8
@@ -277,15 +207,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv2i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv2i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv2i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 2 x s32>)
     PseudoRET implicit $v8
@@ -296,15 +221,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv4i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv4i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: implicitdef_nxv4i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
     $v8m2 = COPY %0(<vscale x 4 x s32>)
     PseudoRET implicit $v8m2
@@ -315,15 +235,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv8i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv8i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: implicitdef_nxv8i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
     $v8m4 = COPY %0(<vscale x 8 x s32>)
     PseudoRET implicit $v8m4
@@ -334,15 +249,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv16i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv16i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: implicitdef_nxv16i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
     $v8m8 = COPY %0(<vscale x 16 x s32>)
     PseudoRET implicit $v8m8
@@ -353,15 +263,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv1i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv1i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: implicitdef_nxv1i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
     $v8 = COPY %0(<vscale x 1 x s64>)
     PseudoRET implicit $v8
@@ -372,15 +277,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv2i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv2i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: implicitdef_nxv2i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
     $v8m2 = COPY %0(<vscale x 2 x s64>)
     PseudoRET implicit $v8m2
@@ -391,15 +291,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv4i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv4i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: implicitdef_nxv4i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
     $v8m4 = COPY %0(<vscale x 4 x s64>)
     PseudoRET implicit $v8m4
@@ -410,15 +305,10 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: implicitdef_nxv8i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: implicitdef_nxv8i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: implicitdef_nxv8i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
     $v8m8 = COPY %0(<vscale x 8 x s64>)
     PseudoRET implicit $v8m8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
index 32310f5e9eb58..b58e87c128218 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 --- |
 
   define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
@@ -231,21 +231,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx1i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx1i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx1i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s8>)
@@ -260,21 +252,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s8>)
@@ -289,21 +273,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 4 x s8>)
@@ -318,21 +294,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx8i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx8i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx8i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 8 x s8>)
@@ -347,21 +315,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx16i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx16i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
@@ -376,21 +336,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx32i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vload_nx32i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vload_nx32i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 32 x s8>)
@@ -405,21 +357,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx64i8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vload_nx64i8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vload_nx64i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 64 x s8>)
@@ -434,21 +378,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx1i16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx1i16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx1i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s16>)
@@ -463,21 +399,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s16>)
@@ -492,21 +420,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -521,21 +441,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx8i16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx8i16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx8i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 8 x s16>)
@@ -550,21 +462,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vload_nx16i16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vload_nx16i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 16 x s16>)
@@ -579,21 +483,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx32i16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vload_nx32i16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vload_nx32i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 32 x s16>)
@@ -608,21 +504,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx1i32
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx1i32
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx1i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -637,21 +525,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i32
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i32
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -666,21 +546,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i32
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx4i32
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx4i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -695,21 +567,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx8i32
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
-    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vload_nx8i32
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
-    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vload_nx8i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -724,21 +588,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i32
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
-    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vload_nx16i32
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
-    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vload_nx16i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -753,21 +609,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx1i64
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx1i64
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx1i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -782,21 +630,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i64
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx2i64
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx2i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -811,21 +651,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i64
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
-    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vload_nx4i64
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
-    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vload_nx4i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -840,21 +672,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx8i64
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
-    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vload_nx8i64
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
-    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vload_nx8i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -869,21 +693,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i8_align1
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx16i8_align1
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx16i8_align1
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
@@ -898,21 +714,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i8_align2
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx16i8_align2
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx16i8_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
@@ -927,21 +735,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i8_align16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx16i8_align16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx16i8_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
@@ -956,21 +756,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx16i8_align64
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx16i8_align64
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx16i8_align64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
@@ -985,23 +777,14 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i16_align1
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
-    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i16_align1
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
-    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i16_align1
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
     %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
@@ -1017,21 +800,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i16_align2
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i16_align2
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i16_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -1046,21 +821,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i16_align4
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i16_align4
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i16_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -1075,21 +842,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i16_align8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i16_align8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i16_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -1104,21 +863,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx4i16_align16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx4i16_align16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx4i16_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -1133,23 +884,14 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i32_align2
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
-    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i32_align2
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
-    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i32_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
     %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
@@ -1165,21 +907,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i32_align4
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i32_align4
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i32_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -1194,21 +928,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i32_align8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i32_align8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i32_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -1223,21 +949,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i32_align16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i32_align16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i32_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -1252,21 +970,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i32_align256
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2i32_align256
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2i32_align256
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -1281,23 +991,14 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i64_align4
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
-    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx2i64_align4
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
-    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx2i64_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
     %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
@@ -1313,21 +1014,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i64_align8
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx2i64_align8
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx2i64_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -1342,21 +1035,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i64_align16
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx2i64_align16
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx2i64_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -1371,21 +1056,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2i64_align32
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
-    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vload_nx2i64_align32
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
-    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vload_nx2i64_align32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -1400,21 +1077,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx1ptr
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx1ptr
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx1ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x p0>)
@@ -1429,21 +1098,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx2ptr
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
-    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vload_nx2ptr
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
-    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vload_nx2ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x p0>)
@@ -1458,21 +1119,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; RV32I-LABEL: name: vload_nx8ptr
-    ; RV32I: liveins: $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
-    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vload_nx8ptr
-    ; RV64I: liveins: $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
-    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vload_nx8ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 8 x p0>)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
index 6985d12eacd56..7e2badbc5cda9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 
 ---
 name:            select_nxv1i8
@@ -12,19 +12,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv1i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv1i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv1i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
@@ -37,19 +30,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv2i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv2i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv2i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
@@ -62,19 +48,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv4i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv4i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv4i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
@@ -87,19 +66,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv8i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv8i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv8i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
@@ -112,19 +84,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv16i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: select_nxv16i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: select_nxv16i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
@@ -137,19 +102,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv32i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: select_nxv32i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: select_nxv32i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
@@ -162,19 +120,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv64i8
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: select_nxv64i8
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: select_nxv64i8
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
     %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
@@ -187,19 +138,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv1i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv1i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv1i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
     %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
@@ -212,19 +156,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv2i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv2i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv2i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
     %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
@@ -237,19 +174,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv4i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv4i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv4i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
     %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
@@ -262,19 +192,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv8i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: select_nxv8i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: select_nxv8i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
     %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
@@ -287,19 +210,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv16i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: select_nxv16i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: select_nxv16i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
     %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
@@ -312,19 +228,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv32i16
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: select_nxv32i16
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: select_nxv32i16
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
     %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
@@ -337,19 +246,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv1i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv1i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv1i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
     %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
@@ -362,19 +264,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv2i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv2i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv2i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
     %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
@@ -387,19 +282,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv4i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: select_nxv4i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: select_nxv4i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
     %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
@@ -412,19 +300,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv8i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: select_nxv8i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: select_nxv8i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
     %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
@@ -437,19 +318,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv16i32
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: select_nxv16i32
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: select_nxv16i32
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
     %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
@@ -462,19 +336,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv1i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: select_nxv1i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: select_nxv1i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
     %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
@@ -487,19 +354,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv2i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: select_nxv2i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: select_nxv2i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
     %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
@@ -512,19 +372,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv4i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: select_nxv4i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: select_nxv4i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
     %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
@@ -537,19 +390,12 @@ legalized:       true
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    ; RV32I-LABEL: name: select_nxv8i64
-    ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: select_nxv8i64
-    ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: select_nxv8i64
+    ; CHECK: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
     %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
     %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
index 25df03fa32dbd..efd7f13e376a5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 
 ---
 name:            sext_nxv1i16_nxv1i8
@@ -14,21 +14,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv1i16_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv1i16_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv1i16_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = G_SEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s16>)
@@ -43,21 +35,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv1i32_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv1i32_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv1i32_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = G_SEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -72,21 +56,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv1i64_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv1i64_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv1i64_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -101,21 +77,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv2i16_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv2i16_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv2i16_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = G_SEXT %0(<vscale x 2 x s8>)
     $v8 = COPY %1(<vscale x 2 x s16>)
@@ -130,21 +98,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv2i32_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv2i32_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv2i32_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = G_SEXT %0(<vscale x 2 x s8>)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -159,21 +119,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv2i64_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: sext_nxv2i64_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: sext_nxv2i64_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s8>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -188,21 +140,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv4i16_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv4i16_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv4i16_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = G_SEXT %0(<vscale x 4 x s8>)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -217,21 +161,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv4i32_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: sext_nxv4i32_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: sext_nxv4i32_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s32>) = G_SEXT %0(<vscale x 4 x s8>)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -246,21 +182,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv4i64_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: sext_nxv4i64_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: sext_nxv4i64_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s8>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -275,21 +203,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv8i16_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: sext_nxv8i16_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: sext_nxv8i16_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s16>) = G_SEXT %0(<vscale x 8 x s8>)
     $v8m2 = COPY %1(<vscale x 8 x s16>)
@@ -304,21 +224,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv8i32_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: sext_nxv8i32_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: sext_nxv8i32_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s32>) = G_SEXT %0(<vscale x 8 x s8>)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -333,21 +245,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv8i64_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: sext_nxv8i64_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: sext_nxv8i64_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s8>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -362,21 +266,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv16i16_nxv16i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: sext_nxv16i16_nxv16i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: sext_nxv16i16_nxv16i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s16>) = G_SEXT %0(<vscale x 16 x s8>)
     $v8m4 = COPY %1(<vscale x 16 x s16>)
@@ -391,21 +287,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv16i32_nxv16i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: sext_nxv16i32_nxv16i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: sext_nxv16i32_nxv16i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s32>) = G_SEXT %0(<vscale x 16 x s8>)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -420,21 +308,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv32i16_nxv32i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[COPY]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: sext_nxv32i16_nxv32i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[COPY]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: sext_nxv32i16_nxv32i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[COPY]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s8>) = COPY $v8m4
     %1:_(<vscale x 32 x s16>) = G_SEXT %0(<vscale x 32 x s8>)
     $v8m8 = COPY %1(<vscale x 32 x s16>)
@@ -449,21 +329,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv1i32_nxv1i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv1i32_nxv1i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv1i32_nxv1i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = G_SEXT %0(<vscale x 1 x s16>)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -478,21 +350,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv1i64_nxv1i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv1i64_nxv1i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv1i64_nxv1i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s16>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -507,21 +371,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv2i32_nxv2i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv2i32_nxv2i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv2i32_nxv2i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = G_SEXT %0(<vscale x 2 x s16>)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -536,21 +392,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv2i64_nxv2i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: sext_nxv2i64_nxv2i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: sext_nxv2i64_nxv2i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s16>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -565,21 +413,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv4i32_nxv4i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: sext_nxv4i32_nxv4i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: sext_nxv4i32_nxv4i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s32>) = G_SEXT %0(<vscale x 4 x s16>)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -594,21 +434,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv4i64_nxv4i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: sext_nxv4i64_nxv4i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: sext_nxv4i64_nxv4i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s16>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -623,21 +455,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv8i32_nxv8i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: sext_nxv8i32_nxv8i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: sext_nxv8i32_nxv8i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s32>) = G_SEXT %0(<vscale x 8 x s16>)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -652,21 +476,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv8i64_nxv8i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: sext_nxv8i64_nxv8i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: sext_nxv8i64_nxv8i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s16>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -681,21 +497,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv16i32_nxv16i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: sext_nxv16i32_nxv16i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: sext_nxv16i32_nxv16i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s16>) = COPY $v8m4
     %1:_(<vscale x 16 x s32>) = G_SEXT %0(<vscale x 16 x s16>)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -710,21 +518,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv1i64_nxv1i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: sext_nxv1i64_nxv1i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: sext_nxv1i64_nxv1i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s32>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -739,21 +539,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv2i64_nxv2i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: sext_nxv2i64_nxv2i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: sext_nxv2i64_nxv2i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s32>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -768,21 +560,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv4i64_nxv4i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: sext_nxv4i64_nxv4i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: sext_nxv4i64_nxv4i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s32>) = COPY $v8m2
     %1:_(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s32>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -797,21 +581,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: sext_nxv8i64_nxv8i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: sext_nxv8i64_nxv8i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: sext_nxv8i64_nxv8i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s32>) = COPY $v8m4
     %1:_(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s32>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
index ceef1680fbf76..3a6a2389d1db0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 --- |
 
   define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
@@ -231,21 +231,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx1i8
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx1i8
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx1i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s8>) = COPY $v8
     G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
@@ -260,21 +252,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i8
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i8
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s8>) = COPY $v8
     G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
@@ -289,21 +273,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i8
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i8
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s8>) = COPY $v8
     G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
@@ -318,21 +294,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx8i8
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx8i8
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx8i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s8>) = COPY $v8
     G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
@@ -347,21 +315,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx16i8
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i8
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i8
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = COPY $v8m2
     G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
@@ -376,21 +336,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m4
 
-    ; RV32I-LABEL: name: vstore_nx32i8
-    ; RV32I: liveins: $x10, $v8m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx32i8
-    ; RV64I: liveins: $x10, $v8m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx32i8
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 32 x s8>) = COPY $v8m4
     G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
@@ -405,21 +357,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m8
 
-    ; RV32I-LABEL: name: vstore_nx64i8
-    ; RV32I: liveins: $x10, $v8m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx64i8
-    ; RV64I: liveins: $x10, $v8m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx64i8
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 64 x s8>) = COPY $v8m8
     G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
@@ -434,21 +378,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx1i16
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx1i16
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx1i16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s16>) = COPY $v8
     G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
@@ -463,21 +399,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i16
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i16
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s16>) = COPY $v8
     G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
@@ -492,21 +420,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i16
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i16
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = COPY $v8
     G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
@@ -521,21 +441,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx8i16
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx8i16
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx8i16
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s16>) = COPY $v8m2
     G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
@@ -550,21 +462,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m4
 
-    ; RV32I-LABEL: name: vstore_nx16i16
-    ; RV32I: liveins: $x10, $v8m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i16
-    ; RV64I: liveins: $x10, $v8m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i16
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s16>) = COPY $v8m4
     G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
@@ -579,21 +483,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m8
 
-    ; RV32I-LABEL: name: vstore_nx32i16
-    ; RV32I: liveins: $x10, $v8m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx32i16
-    ; RV64I: liveins: $x10, $v8m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx32i16
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 32 x s16>) = COPY $v8m8
     G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
@@ -608,21 +504,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx1i32
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx1i32
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx1i32
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s32>) = COPY $v8
     G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
@@ -637,21 +525,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i32
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i32
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i32
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = COPY $v8
     G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
@@ -666,21 +546,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx4i32
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i32
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i32
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s32>) = COPY $v8m2
     G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
@@ -695,21 +567,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m4
 
-    ; RV32I-LABEL: name: vstore_nx8i32
-    ; RV32I: liveins: $x10, $v8m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx8i32
-    ; RV64I: liveins: $x10, $v8m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx8i32
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s32>) = COPY $v8m4
     G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
@@ -724,21 +588,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m8
 
-    ; RV32I-LABEL: name: vstore_nx16i32
-    ; RV32I: liveins: $x10, $v8m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i32
-    ; RV64I: liveins: $x10, $v8m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i32
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s32>) = COPY $v8m8
     G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
@@ -753,21 +609,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx1i64
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx1i64
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx1i64
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x s64>) = COPY $v8
     G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
@@ -782,21 +630,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx2i64
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i64
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i64
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = COPY $v8m2
     G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
@@ -811,21 +651,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m4
 
-    ; RV32I-LABEL: name: vstore_nx4i64
-    ; RV32I: liveins: $x10, $v8m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i64
-    ; RV64I: liveins: $x10, $v8m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i64
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s64>) = COPY $v8m4
     G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
@@ -840,21 +672,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m8
 
-    ; RV32I-LABEL: name: vstore_nx8i64
-    ; RV32I: liveins: $x10, $v8m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx8i64
-    ; RV64I: liveins: $x10, $v8m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx8i64
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x s64>) = COPY $v8m8
     G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
@@ -869,21 +693,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx16i8_align1
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i8_align1
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i8_align1
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = COPY $v8m2
     G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
@@ -898,21 +714,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx16i8_align2
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i8_align2
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i8_align2
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = COPY $v8m2
     G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
@@ -927,21 +735,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx16i8_align16
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i8_align16
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i8_align16
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = COPY $v8m2
     G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
@@ -956,21 +756,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx16i8_align64
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx16i8_align64
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx16i8_align64
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x s8>) = COPY $v8m2
     G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
@@ -985,23 +777,14 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i16_align1
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i16_align1
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i16_align1
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = COPY $v8
     %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
@@ -1017,21 +800,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i16_align2
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i16_align2
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i16_align2
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = COPY $v8
     G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
@@ -1046,21 +821,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i16_align4
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i16_align4
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i16_align4
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = COPY $v8
     G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
@@ -1075,21 +842,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i16_align8
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i16_align8
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i16_align8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = COPY $v8
     G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
@@ -1104,21 +863,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx4i16_align16
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx4i16_align16
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx4i16_align16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 4 x s16>) = COPY $v8
     G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
@@ -1133,23 +884,14 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i32_align2
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i32_align2
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i32_align2
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = COPY $v8
     %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
@@ -1165,21 +907,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i32_align4
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i32_align4
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i32_align4
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = COPY $v8
     G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
@@ -1194,21 +928,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i32_align8
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i32_align8
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i32_align8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = COPY $v8
     G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
@@ -1223,21 +949,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i32_align16
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i32_align16
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i32_align16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = COPY $v8
     G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
@@ -1252,21 +970,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2i32_align256
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i32_align256
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i32_align256
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s32>) = COPY $v8
     G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
@@ -1281,23 +991,14 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx2i64_align4
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i64_align4
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i64_align4
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = COPY $v8m2
     %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
@@ -1313,21 +1014,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx2i64_align8
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i64_align8
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i64_align8
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = COPY $v8m2
     G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
@@ -1342,21 +1035,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx2i64_align16
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i64_align16
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i64_align16
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = COPY $v8m2
     G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
@@ -1371,21 +1056,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m2
 
-    ; RV32I-LABEL: name: vstore_nx2i64_align32
-    ; RV32I: liveins: $x10, $v8m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2i64_align32
-    ; RV64I: liveins: $x10, $v8m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2i64_align32
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x s64>) = COPY $v8m2
     G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
@@ -1400,21 +1077,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx1ptr
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx1ptr
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx1ptr
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x p0>) = COPY $v8
     G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
@@ -1429,21 +1098,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $v8, $x10
 
-    ; RV32I-LABEL: name: vstore_nx2ptr
-    ; RV32I: liveins: $v8, $x10
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx2ptr
-    ; RV64I: liveins: $v8, $x10
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx2ptr
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x p0>) = COPY $v8
     G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
@@ -1458,21 +1119,13 @@ body:             |
   bb.1 (%ir-block.0):
     liveins: $x10, $v8m4
 
-    ; RV32I-LABEL: name: vstore_nx8ptr
-    ; RV32I: liveins: $x10, $v8m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
-    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
-    ; RV32I-NEXT: PseudoRET
-    ;
-    ; RV64I-LABEL: name: vstore_nx8ptr
-    ; RV64I: liveins: $x10, $v8m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
-    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
-    ; RV64I-NEXT: PseudoRET
+    ; CHECK-LABEL: name: vstore_nx8ptr
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x p0>) = COPY $v8m4
     G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
index ef06385de82e8..c18423cbe3758 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sub.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 ---
 name:            vsub_vv_nxv1i8
 legalized:       true
@@ -13,23 +13,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv1i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv1i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv1i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s8>) = COPY $v9
     %2:_(<vscale x 1 x s8>) = G_SUB %0, %1
@@ -45,23 +36,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv2i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv2i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv2i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s8>) = COPY $v9
     %2:_(<vscale x 2 x s8>) = G_SUB %0, %1
@@ -77,23 +59,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv4i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv4i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv4i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s8>) = COPY $v9
     %2:_(<vscale x 4 x s8>) = G_SUB %0, %1
@@ -109,23 +82,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv8i8
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv8i8
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv8i8
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s8>) = COPY $v9
     %2:_(<vscale x 8 x s8>) = G_SUB %0, %1
@@ -141,23 +105,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vsub_vv_nxv16i8
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv16i8
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vsub_vv_nxv16i8
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s8>) = COPY $v10m2
     %2:_(<vscale x 16 x s8>) = G_SUB %0, %1
@@ -173,23 +128,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vsub_vv_nxv32i8
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv32i8
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vsub_vv_nxv32i8
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 32 x s8>) = COPY $v8m4
     %1:_(<vscale x 32 x s8>) = COPY $v12m4
     %2:_(<vscale x 32 x s8>) = G_SUB %0, %1
@@ -205,23 +151,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vsub_vv_nxv64i8
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv64i8
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vsub_vv_nxv64i8
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 64 x s8>) = COPY $v8m8
     %1:_(<vscale x 64 x s8>) = COPY $v16m8
     %2:_(<vscale x 64 x s8>) = G_SUB %0, %1
@@ -237,23 +174,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv1i16
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv1i16
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv1i16
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = COPY $v9
     %2:_(<vscale x 1 x s16>) = G_SUB %0, %1
@@ -269,23 +197,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv2i16
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv2i16
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv2i16
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = COPY $v9
     %2:_(<vscale x 2 x s16>) = G_SUB %0, %1
@@ -301,23 +220,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv4i16
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv4i16
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv4i16
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = COPY $v9
     %2:_(<vscale x 4 x s16>) = G_SUB %0, %1
@@ -333,23 +243,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vsub_vv_nxv8i16
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv8i16
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vsub_vv_nxv8i16
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s16>) = COPY $v10m2
     %2:_(<vscale x 8 x s16>) = G_SUB %0, %1
@@ -365,23 +266,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vsub_vv_nxv16i16
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv16i16
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vsub_vv_nxv16i16
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = COPY $v8m4
     %1:_(<vscale x 16 x s16>) = COPY $v12m4
     %2:_(<vscale x 16 x s16>) = G_SUB %0, %1
@@ -397,23 +289,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vsub_vv_nxv32i16
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv32i16
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vsub_vv_nxv32i16
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s16>) = COPY $v8m8
     %1:_(<vscale x 32 x s16>) = COPY $v16m8
     %2:_(<vscale x 32 x s16>) = G_SUB %0, %1
@@ -429,23 +312,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv1i32
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv1i32
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv1i32
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = COPY $v9
     %2:_(<vscale x 1 x s32>) = G_SUB %0, %1
@@ -461,23 +335,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv2i32
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv2i32
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv2i32
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = COPY $v9
     %2:_(<vscale x 2 x s32>) = G_SUB %0, %1
@@ -493,23 +358,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vsub_vv_nxv4i32
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv4i32
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vsub_vv_nxv4i32
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = COPY $v8m2
     %1:_(<vscale x 4 x s32>) = COPY $v10m2
     %2:_(<vscale x 4 x s32>) = G_SUB %0, %1
@@ -525,23 +381,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vsub_vv_nxv8i32
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv8i32
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vsub_vv_nxv8i32
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = COPY $v8m4
     %1:_(<vscale x 8 x s32>) = COPY $v12m4
     %2:_(<vscale x 8 x s32>) = G_SUB %0, %1
@@ -557,23 +404,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vsub_vv_nxv16i32
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv16i32
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vsub_vv_nxv16i32
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = COPY $v8m8
     %1:_(<vscale x 16 x s32>) = COPY $v16m8
     %2:_(<vscale x 16 x s32>) = G_SUB %0, %1
@@ -589,23 +427,14 @@ body:             |
   bb.0.entry:
     liveins: $v8, $v9
 
-    ; RV32I-LABEL: name: vsub_vv_nxv1i64
-    ; RV32I: liveins: $v8, $v9
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv1i64
-    ; RV64I: liveins: $v8, $v9
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: vsub_vv_nxv1i64
+    ; CHECK: liveins: $v8, $v9
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s64>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = COPY $v9
     %2:_(<vscale x 1 x s64>) = G_SUB %0, %1
@@ -621,23 +450,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m2, $v10m2
 
-    ; RV32I-LABEL: name: vsub_vv_nxv2i64
-    ; RV32I: liveins: $v8m2, $v10m2
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv2i64
-    ; RV64I: liveins: $v8m2, $v10m2
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: vsub_vv_nxv2i64
+    ; CHECK: liveins: $v8m2, $v10m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s64>) = COPY $v8m2
     %1:_(<vscale x 2 x s64>) = COPY $v10m2
     %2:_(<vscale x 2 x s64>) = G_SUB %0, %1
@@ -653,23 +473,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m4, $v12m4
 
-    ; RV32I-LABEL: name: vsub_vv_nxv4i64
-    ; RV32I: liveins: $v8m4, $v12m4
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv4i64
-    ; RV64I: liveins: $v8m4, $v12m4
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: vsub_vv_nxv4i64
+    ; CHECK: liveins: $v8m4, $v12m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s64>) = COPY $v8m4
     %1:_(<vscale x 4 x s64>) = COPY $v12m4
     %2:_(<vscale x 4 x s64>) = G_SUB %0, %1
@@ -685,23 +496,14 @@ body:             |
   bb.0.entry:
     liveins: $v8m8, $v16m8
 
-    ; RV32I-LABEL: name: vsub_vv_nxv8i64
-    ; RV32I: liveins: $v8m8, $v16m8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
-    ; RV32I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: vsub_vv_nxv8i64
-    ; RV64I: liveins: $v8m8, $v16m8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
-    ; RV64I-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: vsub_vv_nxv8i64
+    ; CHECK: liveins: $v8m8, $v16m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s64>) = COPY $v8m8
     %1:_(<vscale x 8 x s64>) = COPY $v16m8
     %2:_(<vscale x 8 x s64>) = G_SUB %0, %1

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir
index a987d2d5011b0..5070c583c5f6b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir
@@ -1,10 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN:   -o - | FileCheck %s
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+# RUN:   -o - | FileCheck %s
 
 ---
 name:            zext_nxv1i16_nxv1i8
@@ -14,21 +14,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv1i16_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv1i16_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv1i16_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s16>) = G_ZEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s16>)
@@ -43,21 +35,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv1i32_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv1i32_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv1i32_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = G_ZEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -72,21 +56,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv1i64_nxv1i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv1i64_nxv1i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv1i64_nxv1i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s8>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -101,21 +77,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv2i16_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv2i16_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv2i16_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s16>) = G_ZEXT %0(<vscale x 2 x s8>)
     $v8 = COPY %1(<vscale x 2 x s16>)
@@ -130,21 +98,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv2i32_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv2i32_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv2i32_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = G_ZEXT %0(<vscale x 2 x s8>)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -159,21 +119,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv2i64_nxv2i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: zext_nxv2i64_nxv2i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: zext_nxv2i64_nxv2i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s8>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s8>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -188,21 +140,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv4i16_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv4i16_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv4i16_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s16>) = G_ZEXT %0(<vscale x 4 x s8>)
     $v8 = COPY %1(<vscale x 4 x s16>)
@@ -217,21 +161,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv4i32_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: zext_nxv4i32_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: zext_nxv4i32_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s32>) = G_ZEXT %0(<vscale x 4 x s8>)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -246,21 +182,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv4i64_nxv4i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: zext_nxv4i64_nxv4i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: zext_nxv4i64_nxv4i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s8>) = COPY $v8
     %1:_(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s8>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -275,21 +203,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv8i16_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: zext_nxv8i16_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: zext_nxv8i16_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s16>) = G_ZEXT %0(<vscale x 8 x s8>)
     $v8m2 = COPY %1(<vscale x 8 x s16>)
@@ -304,21 +224,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv8i32_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: zext_nxv8i32_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: zext_nxv8i32_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s32>) = G_ZEXT %0(<vscale x 8 x s8>)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -333,21 +245,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv8i64_nxv8i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: zext_nxv8i64_nxv8i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: zext_nxv8i64_nxv8i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s8>) = COPY $v8
     %1:_(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s8>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -362,21 +266,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv16i16_nxv16i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: zext_nxv16i16_nxv16i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: zext_nxv16i16_nxv16i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s16>) = G_ZEXT %0(<vscale x 16 x s8>)
     $v8m4 = COPY %1(<vscale x 16 x s16>)
@@ -391,21 +287,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv16i32_nxv16i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: zext_nxv16i32_nxv16i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: zext_nxv16i32_nxv16i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s8>) = COPY $v8m2
     %1:_(<vscale x 16 x s32>) = G_ZEXT %0(<vscale x 16 x s8>)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -420,21 +308,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv32i16_nxv32i8
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[COPY]](<vscale x 32 x s8>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: zext_nxv32i16_nxv32i8
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[COPY]](<vscale x 32 x s8>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: zext_nxv32i16_nxv32i8
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[COPY]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 32 x s8>) = COPY $v8m4
     %1:_(<vscale x 32 x s16>) = G_ZEXT %0(<vscale x 32 x s8>)
     $v8m8 = COPY %1(<vscale x 32 x s16>)
@@ -449,21 +329,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv1i32_nxv1i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv1i32_nxv1i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv1i32_nxv1i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s32>) = G_ZEXT %0(<vscale x 1 x s16>)
     $v8 = COPY %1(<vscale x 1 x s32>)
@@ -478,21 +350,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv1i64_nxv1i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv1i64_nxv1i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv1i64_nxv1i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s16>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -507,21 +371,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv2i32_nxv2i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv2i32_nxv2i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv2i32_nxv2i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s32>) = G_ZEXT %0(<vscale x 2 x s16>)
     $v8 = COPY %1(<vscale x 2 x s32>)
@@ -536,21 +392,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv2i64_nxv2i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: zext_nxv2i64_nxv2i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: zext_nxv2i64_nxv2i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s16>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s16>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -565,21 +413,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv4i32_nxv4i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: zext_nxv4i32_nxv4i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: zext_nxv4i32_nxv4i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s32>) = G_ZEXT %0(<vscale x 4 x s16>)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
@@ -594,21 +434,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv4i64_nxv4i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: zext_nxv4i64_nxv4i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: zext_nxv4i64_nxv4i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s16>) = COPY $v8
     %1:_(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s16>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -623,21 +455,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv8i32_nxv8i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: zext_nxv8i32_nxv8i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: zext_nxv8i32_nxv8i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s16>) = COPY $v8m2
     %1:_(<vscale x 8 x s32>) = G_ZEXT %0(<vscale x 8 x s16>)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
@@ -652,21 +476,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv8i64_nxv8i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: zext_nxv8i64_nxv8i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: zext_nxv8i64_nxv8i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s16>) = COPY $v8m4
     %1:_(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s16>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
@@ -681,21 +497,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv16i32_nxv16i16
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s16>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: zext_nxv16i32_nxv16i16
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s16>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: zext_nxv16i32_nxv16i16
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s16>) = COPY $v8m4
     %1:_(<vscale x 16 x s32>) = G_ZEXT %0(<vscale x 16 x s16>)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
@@ -710,21 +518,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv1i64_nxv1i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s32>)
-    ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8
-    ;
-    ; RV64I-LABEL: name: zext_nxv1i64_nxv1i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s32>)
-    ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8
+    ; CHECK-LABEL: name: zext_nxv1i64_nxv1i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = COPY $v8
     %1:_(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s32>)
     $v8 = COPY %1(<vscale x 1 x s64>)
@@ -739,21 +539,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv2i64_nxv2i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s32>)
-    ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m2
-    ;
-    ; RV64I-LABEL: name: zext_nxv2i64_nxv2i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s32>)
-    ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    ; CHECK-LABEL: name: zext_nxv2i64_nxv2i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 2 x s32>) = COPY $v8
     %1:_(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s32>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
@@ -768,21 +560,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv4i64_nxv4i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s32>)
-    ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m4
-    ;
-    ; RV64I-LABEL: name: zext_nxv4i64_nxv4i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s32>)
-    ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    ; CHECK-LABEL: name: zext_nxv4i64_nxv4i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 4 x s32>) = COPY $v8m2
     %1:_(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s32>)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
@@ -797,21 +581,13 @@ body:             |
   bb.0.entry:
     liveins: $v8
 
-    ; RV32I-LABEL: name: zext_nxv8i64_nxv8i32
-    ; RV32I: liveins: $v8
-    ; RV32I-NEXT: {{  $}}
-    ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s32>)
-    ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
-    ; RV32I-NEXT: PseudoRET implicit $v8m8
-    ;
-    ; RV64I-LABEL: name: zext_nxv8i64_nxv8i32
-    ; RV64I: liveins: $v8
-    ; RV64I-NEXT: {{  $}}
-    ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s32>)
-    ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
-    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    ; CHECK-LABEL: name: zext_nxv8i64_nxv8i32
+    ; CHECK: liveins: $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 8 x s32>) = COPY $v8m4
     %1:_(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s32>)
     $v8m8 = COPY %1(<vscale x 8 x s64>)


        


More information about the llvm-commits mailing list