[llvm] d392520 - [RISCV] Convert vsetvli mir tests to use $noreg instead of implicit_def. NFC

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 30 23:26:02 PDT 2024


Author: Luke Lau
Date: 2024-05-01T14:25:11+08:00
New Revision: d392520c645b653cd9c2ce944958fb115c4ba506

URL: https://github.com/llvm/llvm-project/commit/d392520c645b653cd9c2ce944958fb115c4ba506
DIFF: https://github.com/llvm/llvm-project/commit/d392520c645b653cd9c2ce944958fb115c4ba506.diff

LOG: [RISCV] Convert vsetvli mir tests to use $noreg instead of implicit_def. NFC

This matches what comes out of isel since
a63bd7e99b00c6c970f38ea596f708e42b8c98e5. It also adds the undef flag to
more closely match the output after regalloc, which will help with the test
diffs in #70549

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index 479a356473e12d..c66eb5717048dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -189,9 +189,8 @@ body:             |
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v8
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
   ; CHECK-NEXT:   PseudoBR %bb.1
@@ -199,15 +198,13 @@ body:             |
   ; CHECK-NEXT: bb.1.if.then:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   PseudoBR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.if.else:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %pt3:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 %pt3, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.if.end:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
@@ -221,20 +218,17 @@ body:             |
     %6:vr = COPY $v8
     %5:gpr = COPY $x11
     %4:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %0:vr = PseudoVLE64_V_M1 %pt, %5, %7, 6, 0
+    %0:vr = PseudoVLE64_V_M1 undef $noreg, %5, %7, 6, 0
     %8:gpr = COPY $x0
     BEQ %4, %8, %bb.2
     PseudoBR %bb.1
 
   bb.1.if.then:
-    %pt2:vr = IMPLICIT_DEF
-    %1:vr = PseudoVADD_VV_M1 %pt2, %0, %6, %7, 6, 0
+    %1:vr = PseudoVADD_VV_M1 undef $noreg, %0, %6, %7, 6, 0
     PseudoBR %bb.3
 
   bb.2.if.else:
-    %pt3:vr = IMPLICIT_DEF
-    %2:vr = PseudoVSUB_VV_M1 %pt3, %0, %6, %7, 6, 0
+    %2:vr = PseudoVSUB_VV_M1 undef $noreg, %0, %6, %7, 6, 0
 
   bb.3.if.end:
     %3:vr = PHI %1, %bb.1, %2, %bb.2
@@ -274,9 +268,8 @@ body:             |
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x12
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
   ; CHECK-NEXT:   PseudoBR %bb.1
@@ -284,17 +277,15 @@ body:             |
   ; CHECK-NEXT: bb.1.if.then:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %dead1:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   early-clobber %1:vr = PseudoVZEXT_VF2_M1 %dead1, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   early-clobber %1:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   PseudoBR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.if.else:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %dead2:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   early-clobber %2:vr = PseudoVSEXT_VF2_M1 %dead2, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   early-clobber %2:vr = PseudoVSEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.if.end:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2
@@ -308,20 +299,17 @@ body:             |
     %6:gpr = COPY $x12
     %5:gpr = COPY $x11
     %4:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %0:vr = PseudoVLE32_V_MF2 %pt, %5, %7, 5, 0
+    %0:vr = PseudoVLE32_V_MF2 undef $noreg, %5, %7, 5, 0
     %8:gpr = COPY $x0
     BEQ %4, %8, %bb.2
     PseudoBR %bb.1
 
   bb.1.if.then:
-    %dead1:vr = IMPLICIT_DEF
-    early-clobber %1:vr = PseudoVZEXT_VF2_M1 %dead1, %0, %7, 6, 0
+    early-clobber %1:vr = PseudoVZEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
     PseudoBR %bb.3
 
   bb.2.if.else:
-    %dead2:vr = IMPLICIT_DEF
-    early-clobber %2:vr = PseudoVSEXT_VF2_M1 %dead2, %0, %7, 6, 0
+    early-clobber %2:vr = PseudoVSEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
 
   bb.3.if.end:
     %3:vr = PHI %1, %bb.1, %2, %bb.2
@@ -368,17 +356,15 @@ body:             |
   ; CHECK-NEXT: bb.1.if.then:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   PseudoBR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.if.else:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 %pt2, [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.if.end:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
@@ -398,13 +384,11 @@ body:             |
     PseudoBR %bb.1
 
   bb.1.if.then:
-    %pt:vr = IMPLICIT_DEF
-    %0:vr = PseudoVADD_VV_M1 %pt, %4, %5, %6, 6, 0
+    %0:vr = PseudoVADD_VV_M1 undef $noreg, %4, %5, %6, 6, 0
     PseudoBR %bb.3
 
   bb.2.if.else:
-    %pt2:vr = IMPLICIT_DEF
-    %1:vr = PseudoVSUB_VV_M1 %pt2, %5, %5, %6, 6, 0
+    %1:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %5, %6, 6, 0
 
   bb.3.if.end:
     %2:vr = PHI %0, %bb.1, %1, %bb.2
@@ -453,15 +437,13 @@ body:             |
   ; CHECK-NEXT: bb.1.if.then:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   PseudoBR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.if.else:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 %pt2, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.if.end:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2
@@ -481,13 +463,11 @@ body:             |
     PseudoBR %bb.1
 
   bb.1.if.then:
-    %pt:vr = IMPLICIT_DEF
-    %1:vr = PseudoVADD_VV_M1 %pt, %5, %6, %0, 6, 0
+    %1:vr = PseudoVADD_VV_M1 undef $noreg, %5, %6, %0, 6, 0
     PseudoBR %bb.3
 
   bb.2.if.else:
-    %pt2:vr = IMPLICIT_DEF
-    %2:vr = PseudoVSUB_VV_M1 %pt2, %5, %6, %0, 6, 0
+    %2:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %6, %0, 6, 0
 
   bb.3.if.end:
     %3:vr = PHI %1, %bb.1, %2, %bb.2
@@ -519,13 +499,11 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x11
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY undef $noreg
   ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 %pt, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 %pt2, 0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
@@ -536,8 +514,8 @@ body:             |
   ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
   ; CHECK-NEXT:   [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
-  ; CHECK-NEXT:   BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY2]], %bb.3
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x0
+  ; CHECK-NEXT:   BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY3]], %bb.3
   ; CHECK-NEXT:   PseudoBR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
@@ -546,10 +524,9 @@ body:             |
   ; CHECK-NEXT:   [[LWU:%[0-9]+]]:gpr = LWU [[COPY1]], 0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3:
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[DEF]], %bb.1, [[LWU]], %bb.2
-  ; CHECK-NEXT:   %pt3:vr = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.1, [[LWU]], %bb.2
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 %pt3, [[PseudoVLE32_V_MF2_MASK]], [[PHI]], -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 undef $noreg, [[PseudoVLE32_V_MF2_MASK]], [[PHI]], -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   $v0 = COPY [[PseudoVADD_VX_MF2_]]
   ; CHECK-NEXT:   PseudoRET implicit $v0
   bb.0:
@@ -558,11 +535,9 @@ body:             |
 
     %0:gpr = COPY $x11
     %1:gpr = COPY $x10
-    %2:gpr = IMPLICIT_DEF
-    %pt:vr = IMPLICIT_DEF
-    %3:vr = PseudoVID_V_MF2 %pt, -1, 6, 0
-    %pt2:vr = IMPLICIT_DEF
-    %4:vrnov0 = PseudoVMV_V_I_MF2 %pt2, 0, -1, 5, 0
+    %2:gpr = COPY undef $noreg
+    %3:vr = PseudoVID_V_MF2 undef $noreg, -1, 6, 0
+    %4:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5, 0
 
   bb.1:
     successors: %bb.2(0x40000000), %bb.3(0x40000000)
@@ -582,8 +557,7 @@ body:             |
 
   bb.3:
     %10:gpr = PHI %2, %bb.1, %9, %bb.2
-    %pt3:vr = IMPLICIT_DEF
-    %11:vr = nsw PseudoVADD_VX_MF2 %pt3, %6, %10, -1, 5, 0
+    %11:vr = nsw PseudoVADD_VX_MF2 undef $noreg, %6, %10, -1, 5, 0
     $v0 = COPY %11
     PseudoRET implicit $v0
 ...
@@ -612,17 +586,15 @@ body:             |
   ; CHECK-NEXT:   [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
   ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 %pt, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, %10, %bb.1
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pt2, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]]
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
   ; CHECK-NEXT:   PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
@@ -638,16 +610,14 @@ body:             |
     %1:gpr = PseudoReadVLENB
     %2:gpr = SRLI %1:gpr, 3
     %3:gpr = COPY $x11
-    %pt:vr = IMPLICIT_DEF
-    %4:vr = PseudoVID_V_M1 %pt, -1, 6, 0
+    %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
     %5:gpr = COPY $x0
 
   bb.1:
     successors: %bb.1, %bb.2
 
     %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.1
-    %pt2:vr = IMPLICIT_DEF
-    %7:vr = PseudoVADD_VX_M1 %pt2, %4:vr, %6:gpr, -1, 6, 0
+    %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
     %8:gpr = MUL %6:gpr, %2:gpr
     %9:gpr = ADD %0:gpr, %8:gpr
     PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
@@ -684,17 +654,15 @@ body:             |
   ; CHECK-NEXT:   [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
   ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 %pt, -1, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, %10, %bb.2
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pt2, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]]
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
   ; CHECK-NEXT:   PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
@@ -714,16 +682,14 @@ body:             |
     %1:gpr = PseudoReadVLENB
     %2:gpr = SRLI %1:gpr, 3
     %3:gpr = COPY $x11
-    %pt:vr = IMPLICIT_DEF
-    %4:vr = PseudoVID_V_M1 %pt, -1, 6, 3
+    %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 3
     %5:gpr = COPY $x0
 
   bb.1:
     successors: %bb.3
 
     %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.3
-    %pt2:vr = IMPLICIT_DEF
-    %7:vr = PseudoVADD_VX_M1 %pt2, %4:vr, %6:gpr, -1, 6, 0
+    %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
     %8:gpr = MUL %6:gpr, %2:gpr
     %9:gpr = ADD %0:gpr, %8:gpr
     PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
@@ -782,9 +748,8 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   %dead:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 4, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 %dead, 0, 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVMV_V_I_M1_]]
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY [[COPY2]]
   ; CHECK-NEXT:   [[LUI:%[0-9]+]]:gpr = LUI 1
@@ -796,10 +761,8 @@ body:             |
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[COPY1]], %bb.0, %5, %bb.1
   ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:gpr = PHI [[ADDIW]], %bb.0, %4, %bb.1
   ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:vr = PHI [[COPY3]], %bb.0, %16, %bb.1
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, [[PHI]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
-  ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 undef $noreg, [[PHI]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
+  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gpr = nsw ADDI [[PHI1]], -4
   ; CHECK-NEXT:   [[ADDI1:%[0-9]+]]:gpr = ADDI [[PHI]], 16
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
@@ -808,10 +771,8 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.middle.block:
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gpr = COPY $x0
-  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVMV_S_X:%[0-9]+]]:vr = PseudoVMV_S_X [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype
-  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVMV_S_X:%[0-9]+]]:vr = PseudoVMV_S_X undef $noreg, [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
   ; CHECK-NEXT:   PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
   ; CHECK-NEXT:   PseudoRET
@@ -820,8 +781,7 @@ body:             |
 
     %8:gpr = COPY $x12
     %6:gpr = COPY $x10
-    %dead:vr = IMPLICIT_DEF
-    %11:vr = PseudoVMV_V_I_M1 %dead, 0, 4, 5, 0
+    %11:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5, 0
     %12:vr = COPY %11
     %10:vr = COPY %12
     %13:gpr = LUI 1
@@ -833,10 +793,8 @@ body:             |
     %0:gpr = PHI %6, %bb.0, %5, %bb.1
     %1:gpr = PHI %9, %bb.0, %4, %bb.1
     %2:vr = PHI %10, %bb.0, %16, %bb.1
-    %pt:vr = IMPLICIT_DEF
-    %14:vr = PseudoVLE32_V_M1 %pt, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
-    %pt2:vr = IMPLICIT_DEF
-    %16:vr = PseudoVADD_VV_M1 %pt2, killed %14, %2, 4, 5, 0
+    %14:vr = PseudoVLE32_V_M1 undef $noreg, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
+    %16:vr = PseudoVADD_VV_M1 undef $noreg, killed %14, %2, 4, 5, 0
     %4:gpr = nsw ADDI %1, -4
     %5:gpr = ADDI %0, 16
     %18:gpr = COPY $x0
@@ -845,10 +803,8 @@ body:             |
 
   bb.2.middle.block:
     %19:gpr = COPY $x0
-    %21:vr = IMPLICIT_DEF
-    %20:vr = PseudoVMV_S_X %21, %19, 1, 5
-    %24:vr = IMPLICIT_DEF
-    %23:vr = PseudoVREDSUM_VS_M1_E8 %24, %16, killed %20, 4, 5, 1
+    %20:vr = PseudoVMV_S_X undef $noreg, %19, 1, 5
+    %23:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, %16, killed %20, 4, 5, 1
     PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
     PseudoRET
 
@@ -952,20 +908,18 @@ body:             |
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x15
   ; CHECK-NEXT:   %vlenb:gpr = PseudoReadVLENB
   ; CHECK-NEXT:   %inc:gpr = SRLI killed %vlenb, 3
-  ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 %pt, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT:   PseudoBR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[COPY3]], %bb.0, %12, %bb.3
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr = PHI [[COPY3]], %bb.0, %11, %bb.3
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY2]], [[PHI]]
-  ; CHECK-NEXT:   %pta:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pta, [[PseudoVID_V_M1_]], killed [[ADD]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], killed [[ADD]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[PseudoVMSLTU_VX_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VX_M1 [[PseudoVADD_VX_M1_]], [[COPY1]], -1, 6 /* e64 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSLTU_VX_M1_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
@@ -976,11 +930,9 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr = ADD %src, [[PHI]]
-  ; CHECK-NEXT:   %pt2:vrnov0 = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-  ; CHECK-NEXT:   %ptb:vr = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, killed [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 %ptb, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[PHI]]
   ; CHECK-NEXT:   PseudoVSE8_V_MF8 killed [[PseudoVADD_VI_MF8_]], killed [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
@@ -1005,8 +957,7 @@ body:             |
     %12:gpr = COPY $x15
     %vlenb:gpr = PseudoReadVLENB
     %inc:gpr = SRLI killed %vlenb, 3
-    %pt:vr = IMPLICIT_DEF
-    %10:vr = PseudoVID_V_M1 %pt, -1, 6, 0
+    %10:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
     %59:gpr = COPY $x0
     PseudoBR %bb.1
 
@@ -1015,8 +966,7 @@ body:             |
 
     %26:gpr = PHI %59, %bb.0, %28, %bb.3
     %61:gpr = ADD %12, %26
-    %pta:vr = IMPLICIT_DEF
-    %27:vr = PseudoVADD_VX_M1 %pta, %10, killed %61, -1, 6, 0
+    %27:vr = PseudoVADD_VX_M1 undef $noreg, %10, killed %61, -1, 6, 0
     %62:vr = PseudoVMSLTU_VX_M1 %27, %11, -1, 6
     %63:gpr = PseudoVCPOP_M_B1 %62, -1, 0
     %64:gpr = COPY $x0
@@ -1027,10 +977,8 @@ body:             |
     successors: %bb.3(0x80000000)
 
     %66:gpr = ADD %src, %26
-    %pt2:vrnov0 = IMPLICIT_DEF
-    %67:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed %66, -1, 3, 0
-    %ptb:vr = IMPLICIT_DEF
-    %76:vrnov0 = PseudoVADD_VI_MF8 %ptb, %67, 4, -1, 3, 0
+    %67:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, killed %66, -1, 3, 0
+    %76:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, %67, 4, -1, 3, 0
     %77:gpr = ADD %dst, %26
     PseudoVSE8_V_MF8 killed %76, killed %77, -1, 3
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index e567897aa86897..41a68ef9903e87 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -121,16 +121,14 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x10
     %1:vr = COPY $v9
     %0:vr = COPY $v8
-    %pt:vr = IMPLICIT_DEF
-    %3:vr = PseudoVADD_VV_M1 %pt, %0, %1, %2, 6, 0
+    %3:vr = PseudoVADD_VV_M1 undef $noreg, %0, %1, %2, 6, 0
     $v8 = COPY %3
     PseudoRET implicit $v8
 
@@ -162,20 +160,16 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x11
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0
-    %pt2:vr = IMPLICIT_DEF
-    %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0
+    %3:vr = PseudoVLE64_V_M1 undef $noreg, %0, %2, 6, 0
+    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %3, %1, %2, 6, 0
     $v8 = COPY %4
     PseudoRET implicit $v8
 
@@ -204,19 +198,15 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: %dead:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY %3
     ; CHECK-NEXT: PseudoRET implicit $v8
     %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %2:vr = PseudoVLE32_V_MF2 %pt, %0, %1, 5, 0
-    %dead:vr = IMPLICIT_DEF
-    early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed %2, %1, 6, 0
+    %2:vr = PseudoVLE32_V_MF2 undef $noreg, %0, %1, 5, 0
+    early-clobber %3:vr = PseudoVZEXT_VF2_M1 undef $noreg, killed %2, %1, 6, 0
     $v8 = COPY %3
     PseudoRET implicit $v8
 
@@ -276,23 +266,17 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt2, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
-    ; CHECK-NEXT: %pt3:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt3, [[PseudoVLE64_V_M1_]], [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: PseudoVSE64_V_M1 [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
     ; CHECK-NEXT: PseudoRET
     %1:gpr = COPY $x11
     %0:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %pt2:vr = IMPLICIT_DEF
-    %2:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x)
-    %3:vr = PseudoVLE64_V_M1 %pt2, %1, 2, 6, 0 :: (load (s128) from %ir.y)
-    %pt3:vr = IMPLICIT_DEF
-    %4:vr = PseudoVADD_VV_M1 %pt3, killed %2, killed %3, 2, 6, 0
+    %2:vr = PseudoVLE64_V_M1 undef $noreg, %0, 2, 6, 0 :: (load (s128) from %ir.x)
+    %3:vr = PseudoVLE64_V_M1 undef $noreg, %1, 2, 6, 0 :: (load (s128) from %ir.y)
+    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %2, killed %3, 2, 6, 0
     PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
     PseudoRET
 
@@ -321,23 +305,19 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
     ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 $noreg, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], [[PseudoVLE64_V_M1_]], [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVLE64_V_M1_]], [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype
     ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x)
-    %2:vr = PseudoVMV_V_I_M1 $noreg, 0, -1, 6, 0
-    %4:vr = IMPLICIT_DEF
-    %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1
+    %1:vr = PseudoVLE64_V_M1 undef $noreg, %0, 2, 6, 0 :: (load (s128) from %ir.x)
+    %2:vr = PseudoVMV_V_I_M1 undef $noreg, 0, -1, 6, 0
+    %3:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, killed %1, killed %2, 2, 6, 1
     %5:gpr = PseudoVMV_X_S killed %3, 6
     $x10 = COPY %5
     PseudoRET implicit $x10
@@ -371,16 +351,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
     ; CHECK-NEXT: dead [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x10
     %1:vr = COPY $v9
     %0:vr = COPY $v8
     %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
-    %pt:vr = IMPLICIT_DEF
-    %4:vr = PseudoVADD_VV_M1 %pt, %0, %1, killed %3, 6, 0
+    %4:vr = PseudoVADD_VV_M1 undef $noreg, %0, %1, killed %3, 6, 0
     $v8 = COPY %4
     PseudoRET implicit $v8
 
@@ -412,23 +390,19 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
-    ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x11
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
-    %pt:vr = IMPLICIT_DEF
-    %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0
+    %3:vr = PseudoVLE64_V_M1 undef $noreg, %0, %2, 6, 0
     INLINEASM &"", 1 /* sideeffect attdialect */
-    %pt2:vr = IMPLICIT_DEF
-    %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0
+    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %3, %1, %2, 6, 0
     $v8 = COPY %4
     PseudoRET implicit $v8
 
@@ -443,15 +417,13 @@ body:             |
     ; CHECK-LABEL: name: vmv_v_i_
diff erent_lmuls
     ; CHECK: liveins: $x10, $v8, $x11
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 217 /* e64, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: dead [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 %pt, 4, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: dead [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 undef $noreg, 4, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 198 /* e8, mf4, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-    ; CHECK-NEXT: dead [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 $noreg, 0, 4, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: dead [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 undef $noreg, 0, 4, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: PseudoRET
-    %pt:vrm2 = IMPLICIT_DEF
-    %0:vrm2 = PseudoVID_V_M2 %pt, 4, 6, 3
-    %4:vr = PseudoVMV_V_I_MF4 $noreg, 0, 4, 3, 0
+    %0:vrm2 = PseudoVID_V_M2 undef $noreg, 4, 6, 3
+    %4:vr = PseudoVMV_V_I_MF4 undef $noreg, 0, 4, 3, 0
     PseudoRET
 ...
 ---
@@ -467,14 +439,14 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond:gpr = COPY $x10
   ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   BEQ %cond, $x0, %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 $noreg, 1, 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 1, 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.4(0x40000000), %bb.3(0x40000000)
@@ -488,23 +460,23 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4:
   ; CHECK-NEXT:   $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   dead [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S $noreg, 5 /* e32 */, implicit $vtype
-  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_1:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   dead [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S undef $noreg, 5 /* e32 */, implicit $vtype
+  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_1:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   PseudoRET
   bb.0:
     liveins: $x10
     %cond:gpr = COPY $x10
-    %1:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5, 0
+    %1:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5, 0
     BEQ %cond, $x0, %bb.2
   bb.1:
-    %2:vr = PseudoVMV_V_I_M1 $noreg, 1, 2, 6, 0
+    %2:vr = PseudoVMV_V_I_M1 undef $noreg, 1, 2, 6, 0
   bb.2: ; the exit info here should have sew/lmul ratio only
     BEQ %cond, $x0, %bb.4
   bb.3:
     PseudoCALL $noreg, csr_ilp32_lp64
   bb.4: ; this block will have PRE attempted on it
-    %4:gpr = PseudoVMV_X_S $noreg, 5
-    %5:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5, 0
+    %4:gpr = PseudoVMV_X_S undef $noreg, 5
+    %5:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5, 0
     PseudoRET
 ...
 ---
@@ -520,12 +492,12 @@ body:             |
     ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:gpr = COPY $vtype
     ; CHECK-NEXT: $vl = COPY $x1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: dead [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: dead [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, undef $noreg, undef $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: PseudoRET
     dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
     %1:gpr = COPY $vtype
     $vl = COPY $x1
     dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
-    %4:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
+    %4:vr = PseudoVADD_VV_M1 undef $noreg, undef $noreg, undef $noreg, 3, 6, 0
     PseudoRET
 ...


        


More information about the llvm-commits mailing list