[llvm] ebc5feb - [RISCV] Update mir tests.

Hsiangkai Wang via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 22 18:42:45 PDT 2021


Author: Hsiangkai Wang
Date: 2021-09-23T09:42:16+08:00
New Revision: ebc5feb4ed6b1f59a000669030f9639bf1763403

URL: https://github.com/llvm/llvm-project/commit/ebc5feb4ed6b1f59a000669030f9639bf1763403
DIFF: https://github.com/llvm/llvm-project/commit/ebc5feb4ed6b1f59a000669030f9639bf1763403.diff

LOG: [RISCV] Update mir tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
    llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
    llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
    llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
    llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
    llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir
    llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
    llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
index f1aad8d0b8c8e..5cd831a1d3a3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
@@ -27,32 +27,33 @@ body: |
 
     ; CHECK-LABEL: name: add_scalable_offset
     ; CHECK: liveins: $x10, $x11, $x1
-    ; CHECK: $x2 = frame-setup ADDI $x2, -2032
-    ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 2032
-    ; CHECK: SD killed $x1, $x2, 2024 :: (store (s64) into %stack.3)
-    ; CHECK: SD killed $x8, $x2, 2016 :: (store (s64) into %stack.4)
-    ; CHECK: frame-setup CFI_INSTRUCTION offset $x1, -8
-    ; CHECK: frame-setup CFI_INSTRUCTION offset $x8, -16
-    ; CHECK: $x8 = frame-setup ADDI $x2, 2032
-    ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $x8, 0
-    ; CHECK: $x2 = frame-setup ADDI $x2, -240
-    ; CHECK: $x12 = frame-setup PseudoReadVLENB
-    ; CHECK: $x2 = frame-setup SUB $x2, killed $x12
-    ; CHECK: dead $x0 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: renamable $v25 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-    ; CHECK: $x11 = PseudoReadVLENB
-    ; CHECK: $x10 = LUI 1048575
-    ; CHECK: $x10 = ADDIW killed $x10, 1824
-    ; CHECK: $x10 = ADD $x8, killed $x10
-    ; CHECK: $x10 = SUB killed $x10, killed $x11
-    ; CHECK: VS1R_V killed renamable $v25, killed renamable $x10
-    ; CHECK: $x10 = frame-destroy PseudoReadVLENB
-    ; CHECK: $x2 = frame-destroy ADD $x2, killed $x10
-    ; CHECK: $x2 = frame-destroy ADDI $x2, 240
-    ; CHECK: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
-    ; CHECK: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
-    ; CHECK: $x2 = frame-destroy ADDI $x2, 2032
-    ; CHECK: PseudoRET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x2 = frame-setup ADDI $x2, -2032
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 2032
+    ; CHECK-NEXT: SD killed $x1, $x2, 2024 :: (store (s64) into %stack.3)
+    ; CHECK-NEXT: SD killed $x8, $x2, 2016 :: (store (s64) into %stack.4)
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -8
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x8, -16
+    ; CHECK-NEXT: $x8 = frame-setup ADDI $x2, 2032
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $x8, 0
+    ; CHECK-NEXT: $x2 = frame-setup ADDI $x2, -240
+    ; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB
+    ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: renamable $v25 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+    ; CHECK-NEXT: $x11 = PseudoReadVLENB
+    ; CHECK-NEXT: $x10 = LUI 1048575
+    ; CHECK-NEXT: $x10 = ADDIW killed $x10, 1824
+    ; CHECK-NEXT: $x10 = ADD $x8, killed $x10
+    ; CHECK-NEXT: $x10 = SUB killed $x10, killed $x11
+    ; CHECK-NEXT: VS1R_V killed renamable $v25, killed renamable $x10
+    ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
+    ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 240
+    ; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
+    ; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
+    ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 2032
+    ; CHECK-NEXT: PseudoRET
     %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
     %2:vr = PseudoVLE64_V_M1 %0, %1, 6 :: (load unknown-size from %ir.pa, align 8)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
index 06343928c8e55..c319ba59c2967 100644
--- a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
@@ -26,14 +26,15 @@ body:             |
     liveins: $v0, $v1, $v2
     ; CHECK-LABEL: name: commuted_op_indices
     ; CHECK: liveins: $v0, $v1, $v2
-    ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0
-    ; CHECK: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1
-    ; CHECK: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2
-    ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6, 1, implicit $vl, implicit $vtype
-    ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]]
-    ; CHECK: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $v0 = COPY [[PseudoVNMSUB_VV_M1_]]
-    ; CHECK: PseudoRET implicit $v0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2
+    ; CHECK-NEXT: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6, 1, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]]
+    ; CHECK-NEXT: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v0 = COPY [[PseudoVNMSUB_VV_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v0
     %0:vr = COPY $v0
     %1:vrnov0 = COPY $v1
     %2:vrnov0 = COPY $v2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 7ac03dc37ad72..98032fb998538 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -50,130 +50,134 @@ stack:
 body:             |
   ; CHECK-LABEL: name: spillslot
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: $x12, $x1, $x9, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
-  ; CHECK:   $x2 = frame-setup ADDI $x2, -2032
-  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 2032
-  ; CHECK:   SD killed $x1, $x2, 2024 :: (store (s64) into %stack.3)
-  ; CHECK:   SD killed $x8, $x2, 2016 :: (store (s64) into %stack.4)
-  ; CHECK:   SD killed $x9, $x2, 2008 :: (store (s64) into %stack.5)
-  ; CHECK:   SD killed $x18, $x2, 2000 :: (store (s64) into %stack.6)
-  ; CHECK:   SD killed $x19, $x2, 1992 :: (store (s64) into %stack.7)
-  ; CHECK:   SD killed $x20, $x2, 1984 :: (store (s64) into %stack.8)
-  ; CHECK:   SD killed $x21, $x2, 1976 :: (store (s64) into %stack.9)
-  ; CHECK:   SD killed $x22, $x2, 1968 :: (store (s64) into %stack.10)
-  ; CHECK:   SD killed $x23, $x2, 1960 :: (store (s64) into %stack.11)
-  ; CHECK:   SD killed $x24, $x2, 1952 :: (store (s64) into %stack.12)
-  ; CHECK:   SD killed $x25, $x2, 1944 :: (store (s64) into %stack.13)
-  ; CHECK:   SD killed $x26, $x2, 1936 :: (store (s64) into %stack.14)
-  ; CHECK:   SD killed $x27, $x2, 1928 :: (store (s64) into %stack.15)
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x1, -8
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x8, -16
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x9, -24
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x18, -32
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x19, -40
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x20, -48
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x21, -56
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x22, -64
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x23, -72
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x24, -80
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x25, -88
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x26, -96
-  ; CHECK:   frame-setup CFI_INSTRUCTION offset $x27, -104
-  ; CHECK:   $x8 = frame-setup ADDI $x2, 2032
-  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa $x8, 0
-  ; CHECK:   $x2 = frame-setup ADDI $x2, -272
-  ; CHECK:   $x10 = frame-setup PseudoReadVLENB
-  ; CHECK:   $x11 = frame-setup ADDI $x0, 51
-  ; CHECK:   $x10 = frame-setup MUL killed $x10, killed $x11
-  ; CHECK:   $x2 = frame-setup SUB $x2, killed $x10
-  ; CHECK:   $x2 = frame-setup ANDI $x2, -128
-  ; CHECK:   dead renamable $x15 = PseudoVSETIVLI 1, 72, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   renamable $v25 = PseudoVMV_V_X_M1 killed renamable $x12, $noreg, 16, implicit $vl, implicit $vtype
-  ; CHECK:   $x11 = PseudoReadVLENB
-  ; CHECK:   $x10 = ADDI $x0, 50
-  ; CHECK:   $x11 = MUL killed $x11, killed $x10
-  ; CHECK:   $x10 = LUI 1
-  ; CHECK:   $x10 = ADDIW killed $x10, -1896
-  ; CHECK:   $x10 = ADD $x2, killed $x10
-  ; CHECK:   $x10 = ADD killed $x10, killed $x11
-  ; CHECK:   PseudoVSPILL_M1 killed renamable $v25, killed $x10 :: (store unknown-size into %stack.1, align 8)
-  ; CHECK:   renamable $x1 = ADDI $x0, 255
-  ; CHECK:   renamable $x5 = nuw ADDI $x2, 384
-  ; CHECK:   renamable $x6 = ADDI $x2, 512
-  ; CHECK:   renamable $x7 = nuw ADDI $x2, 640
-  ; CHECK:   renamable $x10 = ADDI $x0, 128
-  ; CHECK:   renamable $x12 = nuw ADDI $x2, 256
-  ; CHECK:   renamable $x14 = COPY $x0
-  ; CHECK:   renamable $x17 = nuw ADDI $x2, 256
-  ; CHECK:   renamable $x18 = ADDI $x2, 1280
-  ; CHECK:   renamable $x19 = ADDI $x2, 1408
-  ; CHECK:   renamable $x20 = ADDI $x2, 1536
-  ; CHECK:   renamable $x21 = ADDI $x2, 1664
-  ; CHECK:   renamable $x22 = ADDI $x2, 1792
-  ; CHECK:   renamable $x23 = ADDI $x2, 1920
-  ; CHECK:   SD killed $x1, $x2, 8 :: (store (s64) into %stack.16)
-  ; CHECK:   SD killed $x5, $x2, 0 :: (store (s64) into %stack.17)
-  ; CHECK:   $x11 = LUI 1
-  ; CHECK:   $x11 = ADDIW killed $x11, -2048
-  ; CHECK:   $x24 = ADD $x2, killed $x11
-  ; CHECK:   renamable $x25 = ADDI $x2, 128
-  ; CHECK:   renamable $x26 = ADDI $x2, 128
-  ; CHECK:   renamable $x27 = ADDI $x0, 2
-  ; CHECK:   renamable $x28 = ADDI $x2, 768
-  ; CHECK:   renamable $x29 = ADDI $x2, 896
-  ; CHECK:   renamable $x30 = ADDI $x2, 1024
-  ; CHECK:   renamable $x31 = nuw ADDI $x2, 1152
-  ; CHECK:   renamable $x15 = ADDIW renamable $x14, 0
-  ; CHECK:   renamable $x11 = ANDI renamable $x15, 255
-  ; CHECK:   renamable $x13 = SLLI renamable $x11, 3
-  ; CHECK:   renamable $x13 = ADD renamable $x26, killed renamable $x13
-  ; CHECK:   renamable $x13 = LD killed renamable $x13, 0 :: (load (s64))
-  ; CHECK:   renamable $x9 = SRAI renamable $x13, 63
-  ; CHECK:   renamable $x9 = SRLI killed renamable $x9, 62
-  ; CHECK:   renamable $x9 = ADD renamable $x13, killed renamable $x9
-  ; CHECK:   renamable $x9 = ANDI killed renamable $x9, -4
-  ; CHECK:   renamable $x16 = SUB killed renamable $x13, renamable $x9
-  ; CHECK:   dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   renamable $x13 = nsw ADDI renamable $x16, -2
-  ; CHECK:   $x5 = PseudoReadVLENB
-  ; CHECK:   $x1 = ADDI $x0, 50
-  ; CHECK:   $x5 = MUL killed $x5, killed $x1
-  ; CHECK:   $x1 = LUI 1
-  ; CHECK:   $x1 = ADDIW killed $x1, -1896
-  ; CHECK:   $x1 = ADD $x2, killed $x1
-  ; CHECK:   $x1 = ADD killed $x1, killed $x5
-  ; CHECK:   $x5 = LD $x2, 0 :: (load (s64) from %stack.17)
-  ; CHECK:   renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8)
-  ; CHECK:   $x1 = LD $x2, 8 :: (load (s64) from %stack.16)
-  ; CHECK:   renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 8, implicit $vl, implicit $vtype
-  ; CHECK:   renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 8, implicit $vl, implicit $vtype
-  ; CHECK:   BLT killed renamable $x16, renamable $x27, %bb.2
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   liveins: $x1, $x5, $x6, $x7, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
-  ; CHECK:   renamable $x9 = COPY killed renamable $x13
-  ; CHECK:   PseudoBR %bb.2
-  ; CHECK: bb.2:
-  ; CHECK:   $x10 = frame-destroy LUI 1
-  ; CHECK:   $x10 = frame-destroy ADDIW killed $x10, -1792
-  ; CHECK:   $x2 = frame-destroy SUB $x8, killed $x10
-  ; CHECK:   $x2 = frame-destroy ADDI $x2, 272
-  ; CHECK:   $x27 = LD $x2, 1928 :: (load (s64) from %stack.15)
-  ; CHECK:   $x26 = LD $x2, 1936 :: (load (s64) from %stack.14)
-  ; CHECK:   $x25 = LD $x2, 1944 :: (load (s64) from %stack.13)
-  ; CHECK:   $x24 = LD $x2, 1952 :: (load (s64) from %stack.12)
-  ; CHECK:   $x23 = LD $x2, 1960 :: (load (s64) from %stack.11)
-  ; CHECK:   $x22 = LD $x2, 1968 :: (load (s64) from %stack.10)
-  ; CHECK:   $x21 = LD $x2, 1976 :: (load (s64) from %stack.9)
-  ; CHECK:   $x20 = LD $x2, 1984 :: (load (s64) from %stack.8)
-  ; CHECK:   $x19 = LD $x2, 1992 :: (load (s64) from %stack.7)
-  ; CHECK:   $x18 = LD $x2, 2000 :: (load (s64) from %stack.6)
-  ; CHECK:   $x9 = LD $x2, 2008 :: (load (s64) from %stack.5)
-  ; CHECK:   $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
-  ; CHECK:   $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
-  ; CHECK:   $x2 = frame-destroy ADDI $x2, 2032
-  ; CHECK:   PseudoRET
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $x12, $x1, $x9, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   $x2 = frame-setup ADDI $x2, -2032
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 2032
+  ; CHECK-NEXT:   SD killed $x1, $x2, 2024 :: (store (s64) into %stack.3)
+  ; CHECK-NEXT:   SD killed $x8, $x2, 2016 :: (store (s64) into %stack.4)
+  ; CHECK-NEXT:   SD killed $x9, $x2, 2008 :: (store (s64) into %stack.5)
+  ; CHECK-NEXT:   SD killed $x18, $x2, 2000 :: (store (s64) into %stack.6)
+  ; CHECK-NEXT:   SD killed $x19, $x2, 1992 :: (store (s64) into %stack.7)
+  ; CHECK-NEXT:   SD killed $x20, $x2, 1984 :: (store (s64) into %stack.8)
+  ; CHECK-NEXT:   SD killed $x21, $x2, 1976 :: (store (s64) into %stack.9)
+  ; CHECK-NEXT:   SD killed $x22, $x2, 1968 :: (store (s64) into %stack.10)
+  ; CHECK-NEXT:   SD killed $x23, $x2, 1960 :: (store (s64) into %stack.11)
+  ; CHECK-NEXT:   SD killed $x24, $x2, 1952 :: (store (s64) into %stack.12)
+  ; CHECK-NEXT:   SD killed $x25, $x2, 1944 :: (store (s64) into %stack.13)
+  ; CHECK-NEXT:   SD killed $x26, $x2, 1936 :: (store (s64) into %stack.14)
+  ; CHECK-NEXT:   SD killed $x27, $x2, 1928 :: (store (s64) into %stack.15)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x1, -8
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x8, -16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x9, -24
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x18, -32
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x19, -40
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x20, -48
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x21, -56
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x22, -64
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x23, -72
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x24, -80
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x25, -88
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x26, -96
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x27, -104
+  ; CHECK-NEXT:   $x8 = frame-setup ADDI $x2, 2032
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa $x8, 0
+  ; CHECK-NEXT:   $x2 = frame-setup ADDI $x2, -272
+  ; CHECK-NEXT:   $x10 = frame-setup PseudoReadVLENB
+  ; CHECK-NEXT:   $x11 = frame-setup ADDI $x0, 51
+  ; CHECK-NEXT:   $x10 = frame-setup MUL killed $x10, killed $x11
+  ; CHECK-NEXT:   $x2 = frame-setup SUB $x2, killed $x10
+  ; CHECK-NEXT:   $x2 = frame-setup ANDI $x2, -128
+  ; CHECK-NEXT:   dead renamable $x15 = PseudoVSETIVLI 1, 72, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   renamable $v25 = PseudoVMV_V_X_M1 killed renamable $x12, $noreg, 16, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   $x11 = PseudoReadVLENB
+  ; CHECK-NEXT:   $x10 = ADDI $x0, 50
+  ; CHECK-NEXT:   $x11 = MUL killed $x11, killed $x10
+  ; CHECK-NEXT:   $x10 = LUI 1
+  ; CHECK-NEXT:   $x10 = ADDIW killed $x10, -1896
+  ; CHECK-NEXT:   $x10 = ADD $x2, killed $x10
+  ; CHECK-NEXT:   $x10 = ADD killed $x10, killed $x11
+  ; CHECK-NEXT:   PseudoVSPILL_M1 killed renamable $v25, killed $x10 :: (store unknown-size into %stack.1, align 8)
+  ; CHECK-NEXT:   renamable $x1 = ADDI $x0, 255
+  ; CHECK-NEXT:   renamable $x5 = nuw ADDI $x2, 384
+  ; CHECK-NEXT:   renamable $x6 = ADDI $x2, 512
+  ; CHECK-NEXT:   renamable $x7 = nuw ADDI $x2, 640
+  ; CHECK-NEXT:   renamable $x10 = ADDI $x0, 128
+  ; CHECK-NEXT:   renamable $x12 = nuw ADDI $x2, 256
+  ; CHECK-NEXT:   renamable $x14 = COPY $x0
+  ; CHECK-NEXT:   renamable $x17 = nuw ADDI $x2, 256
+  ; CHECK-NEXT:   renamable $x18 = ADDI $x2, 1280
+  ; CHECK-NEXT:   renamable $x19 = ADDI $x2, 1408
+  ; CHECK-NEXT:   renamable $x20 = ADDI $x2, 1536
+  ; CHECK-NEXT:   renamable $x21 = ADDI $x2, 1664
+  ; CHECK-NEXT:   renamable $x22 = ADDI $x2, 1792
+  ; CHECK-NEXT:   renamable $x23 = ADDI $x2, 1920
+  ; CHECK-NEXT:   SD killed $x1, $x2, 8 :: (store (s64) into %stack.16)
+  ; CHECK-NEXT:   SD killed $x5, $x2, 0 :: (store (s64) into %stack.17)
+  ; CHECK-NEXT:   $x11 = LUI 1
+  ; CHECK-NEXT:   $x11 = ADDIW killed $x11, -2048
+  ; CHECK-NEXT:   $x24 = ADD $x2, killed $x11
+  ; CHECK-NEXT:   renamable $x25 = ADDI $x2, 128
+  ; CHECK-NEXT:   renamable $x26 = ADDI $x2, 128
+  ; CHECK-NEXT:   renamable $x27 = ADDI $x0, 2
+  ; CHECK-NEXT:   renamable $x28 = ADDI $x2, 768
+  ; CHECK-NEXT:   renamable $x29 = ADDI $x2, 896
+  ; CHECK-NEXT:   renamable $x30 = ADDI $x2, 1024
+  ; CHECK-NEXT:   renamable $x31 = nuw ADDI $x2, 1152
+  ; CHECK-NEXT:   renamable $x15 = ADDIW renamable $x14, 0
+  ; CHECK-NEXT:   renamable $x11 = ANDI renamable $x15, 255
+  ; CHECK-NEXT:   renamable $x13 = SLLI renamable $x11, 3
+  ; CHECK-NEXT:   renamable $x13 = ADD renamable $x26, killed renamable $x13
+  ; CHECK-NEXT:   renamable $x13 = LD killed renamable $x13, 0 :: (load (s64))
+  ; CHECK-NEXT:   renamable $x9 = SRAI renamable $x13, 63
+  ; CHECK-NEXT:   renamable $x9 = SRLI killed renamable $x9, 62
+  ; CHECK-NEXT:   renamable $x9 = ADD renamable $x13, killed renamable $x9
+  ; CHECK-NEXT:   renamable $x9 = ANDI killed renamable $x9, -4
+  ; CHECK-NEXT:   renamable $x16 = SUB killed renamable $x13, renamable $x9
+  ; CHECK-NEXT:   dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   renamable $x13 = nsw ADDI renamable $x16, -2
+  ; CHECK-NEXT:   $x5 = PseudoReadVLENB
+  ; CHECK-NEXT:   $x1 = ADDI $x0, 50
+  ; CHECK-NEXT:   $x5 = MUL killed $x5, killed $x1
+  ; CHECK-NEXT:   $x1 = LUI 1
+  ; CHECK-NEXT:   $x1 = ADDIW killed $x1, -1896
+  ; CHECK-NEXT:   $x1 = ADD $x2, killed $x1
+  ; CHECK-NEXT:   $x1 = ADD killed $x1, killed $x5
+  ; CHECK-NEXT:   $x5 = LD $x2, 0 :: (load (s64) from %stack.17)
+  ; CHECK-NEXT:   renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8)
+  ; CHECK-NEXT:   $x1 = LD $x2, 8 :: (load (s64) from %stack.16)
+  ; CHECK-NEXT:   renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 8, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 8, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   BLT killed renamable $x16, renamable $x27, %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $x1, $x5, $x6, $x7, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   renamable $x9 = COPY killed renamable $x13
+  ; CHECK-NEXT:   PseudoBR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   $x10 = frame-destroy LUI 1
+  ; CHECK-NEXT:   $x10 = frame-destroy ADDIW killed $x10, -1792
+  ; CHECK-NEXT:   $x2 = frame-destroy SUB $x8, killed $x10
+  ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 272
+  ; CHECK-NEXT:   $x27 = LD $x2, 1928 :: (load (s64) from %stack.15)
+  ; CHECK-NEXT:   $x26 = LD $x2, 1936 :: (load (s64) from %stack.14)
+  ; CHECK-NEXT:   $x25 = LD $x2, 1944 :: (load (s64) from %stack.13)
+  ; CHECK-NEXT:   $x24 = LD $x2, 1952 :: (load (s64) from %stack.12)
+  ; CHECK-NEXT:   $x23 = LD $x2, 1960 :: (load (s64) from %stack.11)
+  ; CHECK-NEXT:   $x22 = LD $x2, 1968 :: (load (s64) from %stack.10)
+  ; CHECK-NEXT:   $x21 = LD $x2, 1976 :: (load (s64) from %stack.9)
+  ; CHECK-NEXT:   $x20 = LD $x2, 1984 :: (load (s64) from %stack.8)
+  ; CHECK-NEXT:   $x19 = LD $x2, 1992 :: (load (s64) from %stack.7)
+  ; CHECK-NEXT:   $x18 = LD $x2, 2000 :: (load (s64) from %stack.6)
+  ; CHECK-NEXT:   $x9 = LD $x2, 2008 :: (load (s64) from %stack.5)
+  ; CHECK-NEXT:   $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
+  ; CHECK-NEXT:   $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
+  ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 2032
+  ; CHECK-NEXT:   PseudoRET
   bb.0:
     successors: %bb.1, %bb.2
     liveins: $x12

diff  --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
index 3895cbfba97d4..31614ed25bbe1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
@@ -13,12 +13,13 @@ declare void @llvm.riscv.vse.nxv1i64(
 define i64 @test(<vscale x 1 x i64> %0) nounwind {
   ; CHECK-LABEL: name: test
   ; CHECK: bb.0.entry:
-  ; CHECK:   liveins: $v8
-  ; CHECK:   [[COPY:%[0-9]+]]:vr = COPY $v8
-  ; CHECK:   PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 6
-  ; CHECK:   [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a)
-  ; CHECK:   $x10 = COPY [[LD]]
-  ; CHECK:   PseudoRET implicit $x10
+  ; CHECK-NEXT:   liveins: $v8
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 6
+  ; CHECK-NEXT:   [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a)
+  ; CHECK-NEXT:   $x10 = COPY [[LD]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
   %a = alloca i64
   %b = bitcast i64* %a to <vscale x 1 x i64>*

diff  --git a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
index b8fdb0e6fb146..8da8785956668 100644
--- a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir
@@ -24,16 +24,18 @@ stack:
 body: |
   ; CHECK-LABEL: name: foo
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 0
-  ; CHECK:   $x10 = frame-setup PseudoReadVLENB
-  ; CHECK:   $x10 = frame-setup SLLI killed $x10, 1
-  ; CHECK:   $x2 = frame-setup SUB $x2, killed $x10
-  ; CHECK: bb.1:
-  ; CHECK:   $x10 = frame-destroy PseudoReadVLENB
-  ; CHECK:   $x10 = frame-destroy SLLI killed $x10, 1
-  ; CHECK:   $x2 = frame-destroy ADD $x2, killed $x10
-  ; CHECK:   PseudoRET
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 0
+  ; CHECK-NEXT:   $x10 = frame-setup PseudoReadVLENB
+  ; CHECK-NEXT:   $x10 = frame-setup SLLI killed $x10, 1
+  ; CHECK-NEXT:   $x2 = frame-setup SUB $x2, killed $x10
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   $x10 = frame-destroy PseudoReadVLENB
+  ; CHECK-NEXT:   $x10 = frame-destroy SLLI killed $x10, 1
+  ; CHECK-NEXT:   $x2 = frame-destroy ADD $x2, killed $x10
+  ; CHECK-NEXT:   PseudoRET
   bb.0:
   bb.1:
     PseudoRET

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir
index 1024b31ecf827..74b3c06831114 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir
@@ -15,12 +15,13 @@ body:             |
     liveins: $v0, $v1, $v2, $v3
     ; CHECK-LABEL: name: mask_reg_alloc
     ; CHECK: liveins: $v0, $v1, $v2, $v3
-    ; CHECK: dead $x0 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
-    ; CHECK: renamable $v25 = PseudoVMERGE_VIM_M1 killed renamable $v2, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype
-    ; CHECK: renamable $v0 = COPY killed renamable $v1
-    ; CHECK: renamable $v26 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype
-    ; CHECK: renamable $v0 = PseudoVADD_VV_M1 killed renamable $v25, killed renamable $v26, 1, 3, implicit $vl, implicit $vtype
-    ; CHECK: PseudoRET implicit $v0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: renamable $v25 = PseudoVMERGE_VIM_M1 killed renamable $v2, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: renamable $v0 = COPY killed renamable $v1
+    ; CHECK-NEXT: renamable $v26 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: renamable $v0 = PseudoVADD_VV_M1 killed renamable $v25, killed renamable $v26, 1, 3, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoRET implicit $v0
     %0:vr = COPY $v0
     %1:vr = COPY $v1
     %2:vr = COPY $v2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
index 8db36981fd5a1..504dafc50d8e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
@@ -9,12 +9,13 @@ body:             |
     liveins: $x10
     ; CHECK-LABEL: name: test_earlyclobber
     ; CHECK: liveins: $x10
-    ; CHECK: undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5
-    ; CHECK: %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5
-    ; CHECK: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 $x10, 1, 5
-    ; CHECK: undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 %0.sub_vrm2_0, 0, 1, 5, implicit $vl, implicit $vtype
-    ; CHECK: %2.sub_vrm2_1:vrn2m2 = COPY %0.sub_vrm2_1
-    ; CHECK: PseudoVSUXSEG2EI32_V_M2_M2 %2, $x10, [[PseudoVLE32_V_M2_]], 1, 5, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5
+    ; CHECK-NEXT: %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 $x10, 1, 5
+    ; CHECK-NEXT: undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 %0.sub_vrm2_0, 0, 1, 5, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: %2.sub_vrm2_1:vrn2m2 = COPY %0.sub_vrm2_1
+    ; CHECK-NEXT: PseudoVSUXSEG2EI32_V_M2_M2 %2, $x10, [[PseudoVLE32_V_M2_]], 1, 5, implicit $vl, implicit $vtype
     undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5
     %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5
     %1:vrm2 = PseudoVLE32_V_M2 $x10, 1, 5

diff  --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
index 93ed36abdd48d..d32c4c0ad68eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
@@ -47,14 +47,15 @@ body:             |
 
     ; CHECK-LABEL: name: masked_load_nxv8i64
     ; CHECK: liveins: $x10, $v0
-    ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK: $v0 = COPY [[COPY]]
-    ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
-    ; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
-    ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 :: (load (s512) from %ir.a, align 8)
-    ; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
-    ; CHECK: PseudoRET implicit $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: $v0 = COPY [[COPY]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
+    ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 :: (load (s512) from %ir.a, align 8)
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
     %1:vr = COPY $v0
     %0:gpr = COPY $x10
     $v0 = COPY %1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index da78be7b4b3ae..498307b31ac14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -144,28 +144,34 @@ machineFunctionInfo: {}
 body:             |
   ; CHECK-LABEL: name: load_add_or_sub
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
-  ; CHECK:   liveins: $x10, $x11, $v8, $x12
-  ; CHECK:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
-  ; CHECK:   [[COPY1:%[0-9]+]]:vr = COPY $v8
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
-  ; CHECK:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK:   dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
-  ; CHECK:   BEQ [[COPY3]], [[COPY4]], %bb.2
-  ; CHECK:   PseudoBR %bb.1
-  ; CHECK: bb.1.if.then:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK:   PseudoBR %bb.3
-  ; CHECK: bb.2.if.else:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK: bb.3.if.end:
-  ; CHECK:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
-  ; CHECK:   $v8 = COPY [[PHI]]
-  ; CHECK:   PseudoRET implicit $v8
+  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
+  ; CHECK-NEXT:   liveins: $x10, $x11, $v8, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
+  ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
+  ; CHECK-NEXT:   PseudoBR %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.then:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoBR %bb.3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.else:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3.if.end:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
+  ; CHECK-NEXT:   $v8 = COPY [[PHI]]
+  ; CHECK-NEXT:   PseudoRET implicit $v8
   bb.0.entry:
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $x11, $v8, $x12
@@ -217,30 +223,36 @@ machineFunctionInfo: {}
 body:             |
   ; CHECK-LABEL: name: load_zext_or_sext
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
-  ; CHECK:   liveins: $x10, $x11, $x12, $x13
-  ; CHECK:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x13
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
-  ; CHECK:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK:   dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY2]], $noreg, 5, implicit $vl, implicit $vtype
-  ; CHECK:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
-  ; CHECK:   BEQ [[COPY3]], [[COPY4]], %bb.2
-  ; CHECK:   PseudoBR %bb.1
-  ; CHECK: bb.1.if.then:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK:   early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK:   PseudoBR %bb.3
-  ; CHECK: bb.2.if.else:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK:   early-clobber %2:vr = PseudoVSEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK: bb.3.if.end:
-  ; CHECK:   [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2
-  ; CHECK:   PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK:   PseudoRET
+  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $x13
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x13
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY2]], $noreg, 5, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
+  ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
+  ; CHECK-NEXT:   PseudoBR %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.then:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
+  ; CHECK-NEXT:   early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoBR %bb.3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.else:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
+  ; CHECK-NEXT:   early-clobber %2:vr = PseudoVSEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3.if.end:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2
+  ; CHECK-NEXT:   PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoRET
   bb.0.entry:
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $x11, $x12, $x13
@@ -292,29 +304,35 @@ machineFunctionInfo: {}
 body:             |
   ; CHECK-LABEL: name: vmv_x_s
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
-  ; CHECK:   liveins: $x10, $v8, $v9, $x11
-  ; CHECK:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK:   [[COPY1:%[0-9]+]]:vr = COPY $v9
-  ; CHECK:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
-  ; CHECK:   BEQ [[COPY3]], [[COPY4]], %bb.2
-  ; CHECK:   PseudoBR %bb.1
-  ; CHECK: bb.1.if.then:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK:   PseudoBR %bb.3
-  ; CHECK: bb.2.if.else:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY1]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK: bb.3.if.end:
-  ; CHECK:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
-  ; CHECK:   [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[PHI]], 6, implicit $vtype
-  ; CHECK:   $x10 = COPY [[PseudoVMV_X_S_M1_]]
-  ; CHECK:   PseudoRET implicit $x10
+  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
+  ; CHECK-NEXT:   liveins: $x10, $v8, $v9, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
+  ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
+  ; CHECK-NEXT:   PseudoBR %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.then:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoBR %bb.3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.else:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY1]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3.if.end:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
+  ; CHECK-NEXT:   [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[PHI]], 6, implicit $vtype
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVMV_X_S_M1_]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
   bb.0.entry:
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $v8, $v9, $x11
@@ -366,27 +384,33 @@ machineFunctionInfo: {}
 body:             |
   ; CHECK-LABEL: name: vsetvli_add_or_sub
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
-  ; CHECK:   liveins: $x10, $v8, $v9, $x11
-  ; CHECK:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK:   [[COPY1:%[0-9]+]]:vr = COPY $v9
-  ; CHECK:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK:   [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-  ; CHECK:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
-  ; CHECK:   BEQ [[COPY3]], [[COPY4]], %bb.2
-  ; CHECK:   PseudoBR %bb.1
-  ; CHECK: bb.1.if.then:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK:   PseudoBR %bb.3
-  ; CHECK: bb.2.if.else:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-  ; CHECK: bb.3.if.end:
-  ; CHECK:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
-  ; CHECK:   $v8 = COPY [[PHI]]
-  ; CHECK:   PseudoRET implicit $v8
+  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
+  ; CHECK-NEXT:   liveins: $x10, $v8, $v9, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
+  ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
+  ; CHECK-NEXT:   PseudoBR %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.then:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoBR %bb.3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.else:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3.if.end:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
+  ; CHECK-NEXT:   $v8 = COPY [[PHI]]
+  ; CHECK-NEXT:   PseudoRET implicit $v8
   bb.0.entry:
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $v8, $v9, $x11

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index 4fda0ad095cac..719a4844152a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -113,13 +113,14 @@ body:             |
 
     ; CHECK-LABEL: name: add
     ; CHECK: liveins: $v8, $v9, $x10
-    ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
-    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
-    ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
-    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
-    ; CHECK: PseudoRET implicit $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x10
     %1:vr = COPY $v9
     %0:vr = COPY $v8
@@ -151,14 +152,15 @@ body:             |
 
     ; CHECK-LABEL: name: load_add
     ; CHECK: liveins: $x10, $v8, $x11
-    ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v8
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
-    ; CHECK: PseudoRET implicit $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x11
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
@@ -189,14 +191,15 @@ body:             |
 
     ; CHECK-LABEL: name: load_zext
     ; CHECK: liveins: $x10, $x11
-    ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5, implicit $vl, implicit $vtype
-    ; CHECK: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
-    ; CHECK: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $v8 = COPY %3
-    ; CHECK: PseudoRET implicit $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
+    ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = COPY %3
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
     %2:vr = PseudoVLE32_V_MF2 %0, %1, 5
@@ -223,11 +226,12 @@ body:             |
 
     ; CHECK-LABEL: name: vmv_x_s
     ; CHECK: liveins: $v8
-    ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8
-    ; CHECK: dead $x0 = PseudoVSETIVLI 0, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6, implicit $vtype
-    ; CHECK: $x10 = COPY [[PseudoVMV_X_S_M1_]]
-    ; CHECK: PseudoRET implicit $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6, implicit $vtype
+    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
     %0:vr = COPY $v8
     %1:gpr = PseudoVMV_X_S_M1 %0, 6
     $x10 = COPY %1
@@ -256,14 +260,15 @@ body:             |
 
     ; CHECK-LABEL: name: add_v2i64
     ; CHECK: liveins: $x10, $x11
-    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
-    ; CHECK: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
-    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6, implicit $vl, implicit $vtype
-    ; CHECK: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
-    ; CHECK: PseudoRET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
+    ; CHECK-NEXT: PseudoRET
     %1:gpr = COPY $x11
     %0:gpr = COPY $x10
     %2:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
@@ -295,17 +300,18 @@ body:             |
 
     ; CHECK-LABEL: name: vreduce_add_v2i64
     ; CHECK: liveins: $x10
-    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
-    ; CHECK: dead %6:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6, implicit $vl, implicit $vtype
-    ; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype
-    ; CHECK: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6, implicit $vtype
-    ; CHECK: $x10 = COPY [[PseudoVMV_X_S_M1_]]
-    ; CHECK: PseudoRET implicit $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
+    ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6, implicit $vtype
+    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
     %0:gpr = COPY $x10
     %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
     %2:vr = PseudoVMV_V_I_M1 0, -1, 6
@@ -339,13 +345,14 @@ body:             |
 
     ; CHECK-LABEL: name: vsetvli_add
     ; CHECK: liveins: $v8, $v9, $x10
-    ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
-    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
-    ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
-    ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
-    ; CHECK: PseudoRET implicit $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
+    ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x10
     %1:vr = COPY $v9
     %0:vr = COPY $v8
@@ -378,16 +385,17 @@ body:             |
 
     ; CHECK-LABEL: name: load_add_inlineasm
     ; CHECK: liveins: $x10, $v8, $x11
-    ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v8
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
-    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
-    ; CHECK: PseudoRET implicit $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x11
     %1:vr = COPY $v8
     %0:gpr = COPY $x10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
index 234a54ba74967..2bc83469896cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
@@ -8,29 +8,29 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N2
     ; CHECK: $v2 = PseudoVMV1R_V $v4
-    ; CHECK: $v3 = PseudoVMV1R_V $v5
-    ; CHECK: $v3 = PseudoVMV1R_V $v4
-    ; CHECK: $v4 = PseudoVMV1R_V $v5
-    ; CHECK: $v6 = PseudoVMV1R_V $v5
-    ; CHECK: $v5 = PseudoVMV1R_V $v4
-    ; CHECK: $v6 = PseudoVMV1R_V $v4
-    ; CHECK: $v7 = PseudoVMV1R_V $v5
-    ; CHECK: $v0m2 = PseudoVMV2R_V $v4m2
-    ; CHECK: $v2m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v2m2 = PseudoVMV2R_V $v4m2
-    ; CHECK: $v4m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v8m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v6m2 = PseudoVMV2R_V $v4m2
-    ; CHECK: $v8m2 = PseudoVMV2R_V $v4m2
-    ; CHECK: $v10m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v0m4 = PseudoVMV4R_V $v8m4
-    ; CHECK: $v4m4 = PseudoVMV4R_V $v12m4
-    ; CHECK: $v4m4 = PseudoVMV4R_V $v8m4
-    ; CHECK: $v8m4 = PseudoVMV4R_V $v12m4
-    ; CHECK: $v16m4 = PseudoVMV4R_V $v12m4
-    ; CHECK: $v12m4 = PseudoVMV4R_V $v8m4
-    ; CHECK: $v16m4 = PseudoVMV4R_V $v8m4
-    ; CHECK: $v20m4 = PseudoVMV4R_V $v12m4
+    ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v4
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v4
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v4
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v0m2 = PseudoVMV2R_V $v4m2
+    ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v4m2
+    ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v4m2
+    ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v4m2
+    ; CHECK-NEXT: $v10m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v0m4 = PseudoVMV4R_V $v8m4
+    ; CHECK-NEXT: $v4m4 = PseudoVMV4R_V $v12m4
+    ; CHECK-NEXT: $v4m4 = PseudoVMV4R_V $v8m4
+    ; CHECK-NEXT: $v8m4 = PseudoVMV4R_V $v12m4
+    ; CHECK-NEXT: $v16m4 = PseudoVMV4R_V $v12m4
+    ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v8m4
+    ; CHECK-NEXT: $v16m4 = PseudoVMV4R_V $v8m4
+    ; CHECK-NEXT: $v20m4 = PseudoVMV4R_V $v12m4
     $v2_v3 = COPY $v4_v5
     $v3_v4 = COPY $v4_v5
     $v5_v6 = COPY $v4_v5
@@ -52,32 +52,32 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N3
     ; CHECK: $v2 = PseudoVMV1R_V $v5
-    ; CHECK: $v3 = PseudoVMV1R_V $v6
-    ; CHECK: $v4 = PseudoVMV1R_V $v7
-    ; CHECK: $v3 = PseudoVMV1R_V $v5
-    ; CHECK: $v4 = PseudoVMV1R_V $v6
-    ; CHECK: $v5 = PseudoVMV1R_V $v7
-    ; CHECK: $v4 = PseudoVMV1R_V $v5
-    ; CHECK: $v5 = PseudoVMV1R_V $v6
-    ; CHECK: $v6 = PseudoVMV1R_V $v7
-    ; CHECK: $v9 = PseudoVMV1R_V $v7
-    ; CHECK: $v8 = PseudoVMV1R_V $v6
-    ; CHECK: $v7 = PseudoVMV1R_V $v5
-    ; CHECK: $v9 = PseudoVMV1R_V $v5
-    ; CHECK: $v10 = PseudoVMV1R_V $v6
-    ; CHECK: $v11 = PseudoVMV1R_V $v7
-    ; CHECK: $v0m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v2m2 = PseudoVMV2R_V $v8m2
-    ; CHECK: $v4m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v2m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v4m2 = PseudoVMV2R_V $v8m2
-    ; CHECK: $v6m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v14m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v12m2 = PseudoVMV2R_V $v8m2
-    ; CHECK: $v10m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v12m2 = PseudoVMV2R_V $v6m2
-    ; CHECK: $v14m2 = PseudoVMV2R_V $v8m2
-    ; CHECK: $v16m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v6
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v7
+    ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v6
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v7
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v6
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v7
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v7
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v6
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v5
+    ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v6
+    ; CHECK-NEXT: $v11 = PseudoVMV1R_V $v7
+    ; CHECK-NEXT: $v0m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v8m2
+    ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v8m2
+    ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v14m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v8m2
+    ; CHECK-NEXT: $v10m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v6m2
+    ; CHECK-NEXT: $v14m2 = PseudoVMV2R_V $v8m2
+    ; CHECK-NEXT: $v16m2 = PseudoVMV2R_V $v10m2
     $v2_v3_v4 = COPY $v5_v6_v7
     $v3_v4_v5 = COPY $v5_v6_v7
     $v4_v5_v6 = COPY $v5_v6_v7
@@ -95,37 +95,37 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N4
     ; CHECK: $v6 = PseudoVMV1R_V $v10
-    ; CHECK: $v7 = PseudoVMV1R_V $v11
-    ; CHECK: $v8 = PseudoVMV1R_V $v12
-    ; CHECK: $v9 = PseudoVMV1R_V $v13
-    ; CHECK: $v7 = PseudoVMV1R_V $v10
-    ; CHECK: $v8 = PseudoVMV1R_V $v11
-    ; CHECK: $v9 = PseudoVMV1R_V $v12
-    ; CHECK: $v10 = PseudoVMV1R_V $v13
-    ; CHECK: $v16 = PseudoVMV1R_V $v13
-    ; CHECK: $v15 = PseudoVMV1R_V $v12
-    ; CHECK: $v14 = PseudoVMV1R_V $v11
-    ; CHECK: $v13 = PseudoVMV1R_V $v10
-    ; CHECK: $v14 = PseudoVMV1R_V $v10
-    ; CHECK: $v15 = PseudoVMV1R_V $v11
-    ; CHECK: $v16 = PseudoVMV1R_V $v12
-    ; CHECK: $v17 = PseudoVMV1R_V $v13
-    ; CHECK: $v2m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v4m2 = PseudoVMV2R_V $v12m2
-    ; CHECK: $v6m2 = PseudoVMV2R_V $v14m2
-    ; CHECK: $v8m2 = PseudoVMV2R_V $v16m2
-    ; CHECK: $v4m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v6m2 = PseudoVMV2R_V $v12m2
-    ; CHECK: $v8m2 = PseudoVMV2R_V $v14m2
-    ; CHECK: $v10m2 = PseudoVMV2R_V $v16m2
-    ; CHECK: $v22m2 = PseudoVMV2R_V $v16m2
-    ; CHECK: $v20m2 = PseudoVMV2R_V $v14m2
-    ; CHECK: $v18m2 = PseudoVMV2R_V $v12m2
-    ; CHECK: $v16m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v18m2 = PseudoVMV2R_V $v10m2
-    ; CHECK: $v20m2 = PseudoVMV2R_V $v12m2
-    ; CHECK: $v22m2 = PseudoVMV2R_V $v14m2
-    ; CHECK: $v24m2 = PseudoVMV2R_V $v16m2
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v14 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v13 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v14 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v12m2
+    ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v14m2
+    ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v16m2
+    ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v12m2
+    ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v14m2
+    ; CHECK-NEXT: $v10m2 = PseudoVMV2R_V $v16m2
+    ; CHECK-NEXT: $v22m2 = PseudoVMV2R_V $v16m2
+    ; CHECK-NEXT: $v20m2 = PseudoVMV2R_V $v14m2
+    ; CHECK-NEXT: $v18m2 = PseudoVMV2R_V $v12m2
+    ; CHECK-NEXT: $v16m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v18m2 = PseudoVMV2R_V $v10m2
+    ; CHECK-NEXT: $v20m2 = PseudoVMV2R_V $v12m2
+    ; CHECK-NEXT: $v22m2 = PseudoVMV2R_V $v14m2
+    ; CHECK-NEXT: $v24m2 = PseudoVMV2R_V $v16m2
     $v6_v7_v8_v9 = COPY $v10_v11_v12_v13
     $v7_v8_v9_v10 = COPY $v10_v11_v12_v13
     $v13_v14_v15_v16 = COPY $v10_v11_v12_v13
@@ -142,25 +142,25 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N5
     ; CHECK: $v5 = PseudoVMV1R_V $v10
-    ; CHECK: $v6 = PseudoVMV1R_V $v11
-    ; CHECK: $v7 = PseudoVMV1R_V $v12
-    ; CHECK: $v8 = PseudoVMV1R_V $v13
-    ; CHECK: $v9 = PseudoVMV1R_V $v14
-    ; CHECK: $v6 = PseudoVMV1R_V $v10
-    ; CHECK: $v7 = PseudoVMV1R_V $v11
-    ; CHECK: $v8 = PseudoVMV1R_V $v12
-    ; CHECK: $v9 = PseudoVMV1R_V $v13
-    ; CHECK: $v10 = PseudoVMV1R_V $v14
-    ; CHECK: $v18 = PseudoVMV1R_V $v14
-    ; CHECK: $v17 = PseudoVMV1R_V $v13
-    ; CHECK: $v16 = PseudoVMV1R_V $v12
-    ; CHECK: $v15 = PseudoVMV1R_V $v11
-    ; CHECK: $v14 = PseudoVMV1R_V $v10
-    ; CHECK: $v15 = PseudoVMV1R_V $v10
-    ; CHECK: $v16 = PseudoVMV1R_V $v11
-    ; CHECK: $v17 = PseudoVMV1R_V $v12
-    ; CHECK: $v18 = PseudoVMV1R_V $v13
-    ; CHECK: $v19 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v14 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v14
     $v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14
     $v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14
     $v14_v15_v16_v17_v18 = COPY $v10_v11_v12_v13_v14
@@ -172,29 +172,29 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N6
     ; CHECK: $v4 = PseudoVMV1R_V $v10
-    ; CHECK: $v5 = PseudoVMV1R_V $v11
-    ; CHECK: $v6 = PseudoVMV1R_V $v12
-    ; CHECK: $v7 = PseudoVMV1R_V $v13
-    ; CHECK: $v8 = PseudoVMV1R_V $v14
-    ; CHECK: $v9 = PseudoVMV1R_V $v15
-    ; CHECK: $v5 = PseudoVMV1R_V $v10
-    ; CHECK: $v6 = PseudoVMV1R_V $v11
-    ; CHECK: $v7 = PseudoVMV1R_V $v12
-    ; CHECK: $v8 = PseudoVMV1R_V $v13
-    ; CHECK: $v9 = PseudoVMV1R_V $v14
-    ; CHECK: $v10 = PseudoVMV1R_V $v15
-    ; CHECK: $v20 = PseudoVMV1R_V $v15
-    ; CHECK: $v19 = PseudoVMV1R_V $v14
-    ; CHECK: $v18 = PseudoVMV1R_V $v13
-    ; CHECK: $v17 = PseudoVMV1R_V $v12
-    ; CHECK: $v16 = PseudoVMV1R_V $v11
-    ; CHECK: $v15 = PseudoVMV1R_V $v10
-    ; CHECK: $v16 = PseudoVMV1R_V $v10
-    ; CHECK: $v17 = PseudoVMV1R_V $v11
-    ; CHECK: $v18 = PseudoVMV1R_V $v12
-    ; CHECK: $v19 = PseudoVMV1R_V $v13
-    ; CHECK: $v20 = PseudoVMV1R_V $v14
-    ; CHECK: $v21 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v15
     $v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15
     $v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15
     $v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15
@@ -206,33 +206,33 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N7
     ; CHECK: $v3 = PseudoVMV1R_V $v10
-    ; CHECK: $v4 = PseudoVMV1R_V $v11
-    ; CHECK: $v5 = PseudoVMV1R_V $v12
-    ; CHECK: $v6 = PseudoVMV1R_V $v13
-    ; CHECK: $v7 = PseudoVMV1R_V $v14
-    ; CHECK: $v8 = PseudoVMV1R_V $v15
-    ; CHECK: $v9 = PseudoVMV1R_V $v16
-    ; CHECK: $v4 = PseudoVMV1R_V $v10
-    ; CHECK: $v5 = PseudoVMV1R_V $v11
-    ; CHECK: $v6 = PseudoVMV1R_V $v12
-    ; CHECK: $v7 = PseudoVMV1R_V $v13
-    ; CHECK: $v8 = PseudoVMV1R_V $v14
-    ; CHECK: $v9 = PseudoVMV1R_V $v15
-    ; CHECK: $v10 = PseudoVMV1R_V $v16
-    ; CHECK: $v22 = PseudoVMV1R_V $v16
-    ; CHECK: $v21 = PseudoVMV1R_V $v15
-    ; CHECK: $v20 = PseudoVMV1R_V $v14
-    ; CHECK: $v19 = PseudoVMV1R_V $v13
-    ; CHECK: $v18 = PseudoVMV1R_V $v12
-    ; CHECK: $v17 = PseudoVMV1R_V $v11
-    ; CHECK: $v16 = PseudoVMV1R_V $v10
-    ; CHECK: $v17 = PseudoVMV1R_V $v10
-    ; CHECK: $v18 = PseudoVMV1R_V $v11
-    ; CHECK: $v19 = PseudoVMV1R_V $v12
-    ; CHECK: $v20 = PseudoVMV1R_V $v13
-    ; CHECK: $v21 = PseudoVMV1R_V $v14
-    ; CHECK: $v22 = PseudoVMV1R_V $v15
-    ; CHECK: $v23 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v23 = PseudoVMV1R_V $v16
     $v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16
     $v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16
     $v16_v17_v18_v19_v20_v21_v22 = COPY $v10_v11_v12_v13_v14_v15_v16
@@ -244,37 +244,37 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N8
     ; CHECK: $v2 = PseudoVMV1R_V $v10
-    ; CHECK: $v3 = PseudoVMV1R_V $v11
-    ; CHECK: $v4 = PseudoVMV1R_V $v12
-    ; CHECK: $v5 = PseudoVMV1R_V $v13
-    ; CHECK: $v6 = PseudoVMV1R_V $v14
-    ; CHECK: $v7 = PseudoVMV1R_V $v15
-    ; CHECK: $v8 = PseudoVMV1R_V $v16
-    ; CHECK: $v9 = PseudoVMV1R_V $v17
-    ; CHECK: $v3 = PseudoVMV1R_V $v10
-    ; CHECK: $v4 = PseudoVMV1R_V $v11
-    ; CHECK: $v5 = PseudoVMV1R_V $v12
-    ; CHECK: $v6 = PseudoVMV1R_V $v13
-    ; CHECK: $v7 = PseudoVMV1R_V $v14
-    ; CHECK: $v8 = PseudoVMV1R_V $v15
-    ; CHECK: $v9 = PseudoVMV1R_V $v16
-    ; CHECK: $v10 = PseudoVMV1R_V $v17
-    ; CHECK: $v24 = PseudoVMV1R_V $v17
-    ; CHECK: $v23 = PseudoVMV1R_V $v16
-    ; CHECK: $v22 = PseudoVMV1R_V $v15
-    ; CHECK: $v21 = PseudoVMV1R_V $v14
-    ; CHECK: $v20 = PseudoVMV1R_V $v13
-    ; CHECK: $v19 = PseudoVMV1R_V $v12
-    ; CHECK: $v18 = PseudoVMV1R_V $v11
-    ; CHECK: $v17 = PseudoVMV1R_V $v10
-    ; CHECK: $v18 = PseudoVMV1R_V $v10
-    ; CHECK: $v19 = PseudoVMV1R_V $v11
-    ; CHECK: $v20 = PseudoVMV1R_V $v12
-    ; CHECK: $v21 = PseudoVMV1R_V $v13
-    ; CHECK: $v22 = PseudoVMV1R_V $v14
-    ; CHECK: $v23 = PseudoVMV1R_V $v15
-    ; CHECK: $v24 = PseudoVMV1R_V $v16
-    ; CHECK: $v25 = PseudoVMV1R_V $v17
+    ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v17
+    ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v17
+    ; CHECK-NEXT: $v24 = PseudoVMV1R_V $v17
+    ; CHECK-NEXT: $v23 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v10
+    ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v11
+    ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v12
+    ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v13
+    ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v14
+    ; CHECK-NEXT: $v23 = PseudoVMV1R_V $v15
+    ; CHECK-NEXT: $v24 = PseudoVMV1R_V $v16
+    ; CHECK-NEXT: $v25 = PseudoVMV1R_V $v17
     $v2_v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
     $v3_v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
     $v17_v18_v19_v20_v21_v22_v23_v24 = COPY $v10_v11_v12_v13_v14_v15_v16_v17

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index cbde2565348eb..5dc2b76ceb979 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -20,25 +20,26 @@ body: |
 
     ; CHECK-LABEL: name: zvlsseg_spill
     ; CHECK: liveins: $x10, $x11
-    ; CHECK: $x2 = frame-setup ADDI $x2, -16
-    ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK: $x12 = frame-setup PseudoReadVLENB
-    ; CHECK: $x12 = frame-setup SLLI killed $x12, 3
-    ; CHECK: $x2 = frame-setup SUB $x2, killed $x12
-    ; CHECK: dead $x0 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 renamable $x10, $noreg, 6, implicit $vl, implicit $vtype
-    ; CHECK: $x11 = ADDI $x2, 16
-    ; CHECK: $x12 = PseudoReadVLENB
-    ; CHECK: dead renamable $x11 = PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, killed $x11, killed $x12
-    ; CHECK: $x11 = ADDI $x2, 16
-    ; CHECK: $x12 = PseudoReadVLENB
-    ; CHECK: dead renamable $v7_v8_v9_v10_v11_v12_v13, dead renamable $x11 = PseudoVRELOAD7_M1 killed $x11, killed $x12, implicit-def $v8
-    ; CHECK: VS1R_V killed $v8, killed renamable $x10
-    ; CHECK: $x10 = frame-destroy PseudoReadVLENB
-    ; CHECK: $x10 = frame-destroy SLLI killed $x10, 3
-    ; CHECK: $x2 = frame-destroy ADD $x2, killed $x10
-    ; CHECK: $x2 = frame-destroy ADDI $x2, 16
-    ; CHECK: PseudoRET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x2 = frame-setup ADDI $x2, -16
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+    ; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB
+    ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 3
+    ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 renamable $x10, $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $x11 = ADDI $x2, 16
+    ; CHECK-NEXT: $x12 = PseudoReadVLENB
+    ; CHECK-NEXT: dead renamable $x11 = PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, killed $x11, killed $x12
+    ; CHECK-NEXT: $x11 = ADDI $x2, 16
+    ; CHECK-NEXT: $x12 = PseudoReadVLENB
+    ; CHECK-NEXT: dead renamable $v7_v8_v9_v10_v11_v12_v13, dead renamable $x11 = PseudoVRELOAD7_M1 killed $x11, killed $x12, implicit-def $v8
+    ; CHECK-NEXT: VS1R_V killed $v8, killed renamable $x10
+    ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
+    ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3
+    ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
+    ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
+    ; CHECK-NEXT: PseudoRET
     %0:gpr = COPY $x10
     %1:gprnox0 = COPY $x11
     $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 %0, %1, 6


        


More information about the llvm-commits mailing list