[llvm] 3b8c0b3 - [RISCV] Add new pass to transform undef to pseudo for vector values.

Piyou Chen via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 22 04:33:44 PST 2023


Author: Piyou Chen
Date: 2023-02-22T04:03:22-08:00
New Revision: 3b8c0b342e16e9df74ed26b96b18b0a623896004

URL: https://github.com/llvm/llvm-project/commit/3b8c0b342e16e9df74ed26b96b18b0a623896004
DIFF: https://github.com/llvm/llvm-project/commit/3b8c0b342e16e9df74ed26b96b18b0a623896004.diff

LOG: [RISCV] Add new pass to transform undef to pseudo for vector values.

RISC-V vector instruction has register overlapping constraint for certain
instructions, and will cause illegal instruction trap if violated, we use
early clobber to model this constraint, but it can't prevent register allocator
allocated same or overlapped if the input register is undef value, so convert
IMPLICIT_DEF to temporary pseudo could prevent that happen, it's not best way
to resolve this. Ideally we should model the constraint right, but before we
model the constraint right, it's the approach to prevent that happen.

See also: https://github.com/llvm/llvm-project/issues/50157

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D129735

Added: 
    llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp

Modified: 
    llvm/lib/Target/RISCV/CMakeLists.txt
    llvm/lib/Target/RISCV/RISCV.h
    llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
    llvm/test/CodeGen/RISCV/O3-pipeline.ll
    llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
    llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
    llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
    llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index f909e0017e2b..9eb1a3b23939 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -36,6 +36,7 @@ add_llvm_target(RISCVCodeGen
   RISCVMergeBaseOffset.cpp
   RISCVRedundantCopyElimination.cpp
   RISCVRegisterInfo.cpp
+  RISCVRVVInitUndef.cpp
   RISCVSExtWRemoval.cpp
   RISCVStripWSuffix.cpp
   RISCVSubtarget.cpp

diff  --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index c42fb070aade..88581e0024da 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -71,6 +71,10 @@ void initializeRISCVInsertVSETVLIPass(PassRegistry &);
 FunctionPass *createRISCVRedundantCopyEliminationPass();
 void initializeRISCVRedundantCopyEliminationPass(PassRegistry &);
 
+FunctionPass *createRISCVInitUndefPass();
+void initializeRISCVInitUndefPass(PassRegistry &);
+extern char &RISCVInitUndefID;
+
 InstructionSelector *createRISCVInstructionSelector(const RISCVTargetMachine &,
                                                     RISCVSubtarget &,
                                                     RISCVRegisterBankInfo &);

diff  --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index a4b999e6aa3b..30a26e404cbb 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -113,6 +113,11 @@ void RISCVAsmPrinter::emitInstruction(const MachineInstr *MI) {
   case RISCV::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
     LowerHWASAN_CHECK_MEMACCESS(*MI);
     return;
+  case RISCV::PseudoRVVInitUndefM1:
+  case RISCV::PseudoRVVInitUndefM2:
+  case RISCV::PseudoRVVInitUndefM4:
+  case RISCV::PseudoRVVInitUndefM8:
+    return;
   }
 
   MCInst TmpInst;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index fe8366a11fc4..52d8a0e30b4a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1870,6 +1870,14 @@ def : Pat<(binop_allwusers<add> GPR:$rs1, (AddiPair:$rs2)),
                  (AddiPairImmSmall AddiPair:$rs2))>;
 }
 
+/// Empty pseudo for RISCVInitUndefPass
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 0, isCodeGenOnly = 1 in {
+  def PseudoRVVInitUndefM1     :  Pseudo<(outs VR:$vd), (ins), [], "">;
+  def PseudoRVVInitUndefM2     :  Pseudo<(outs VRM2:$vd), (ins), [], "">;
+  def PseudoRVVInitUndefM4     :  Pseudo<(outs VRM4:$vd), (ins), [], "">;
+  def PseudoRVVInitUndefM8     :  Pseudo<(outs VRM8:$vd), (ins), [], "">;
+}
+
 //===----------------------------------------------------------------------===//
 // Standard extensions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
new file mode 100644
index 000000000000..a1de27f0f991
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
@@ -0,0 +1,274 @@
+//===- RISCVRVVInitUndef.cpp - Initialize undef vector value to pseudo ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a function pass that initializes undef vector value to
+// temporary pseudo instruction and remove it in expandpseudo pass to prevent
+// register allocation resulting in a constraint violated result for vector
+// instruction.
+//
+// RISC-V vector instruction has register overlapping constraint for certain
+// instructions, and will cause illegal instruction trap if violated, we use
+// early clobber to model this constraint, but it can't prevent register
+// allocator allocated same or overlapped if the input register is undef value,
+// so convert IMPLICIT_DEF to temporary pseudo instruction and remove it later
+// could prevent that happen, it's not best way to resolve this, and it might
+// change the order of program or increase the register pressure, so ideally we
+// should model the constraint right, but before we model the constraint right,
+// it's the only way to prevent that happen.
+//
+// When we enable the subregister liveness option, it will also trigger same
+// issue due to the partial of register is undef. If we pseudoinit the whole
+// register, then it will generate redundant COPY instruction. Currently, it
+// will generate INSERT_SUBREG to make sure the whole register is occupied
+// when program encounter operation that has early-clobber constraint.
+//
+//
+// See also: https://github.com/llvm/llvm-project/issues/50157
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/DetectDeadLanes.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-init-undef"
+#define RISCV_INIT_UNDEF_NAME "RISCV init undef pass"
+
+namespace {
+
+class RISCVInitUndef : public MachineFunctionPass {
+  const TargetInstrInfo *TII;
+  MachineRegisterInfo *MRI;
+  const RISCVSubtarget *ST;
+  const TargetRegisterInfo *TRI;
+
+public:
+  static char ID;
+
+  RISCVInitUndef() : MachineFunctionPass(ID) {
+    initializeRISCVInitUndefPass(*PassRegistry::getPassRegistry());
+  }
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  StringRef getPassName() const override { return RISCV_INIT_UNDEF_NAME; }
+
+private:
+  bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
+                         const DeadLaneDetector &DLD);
+  bool handleImplicitDef(MachineBasicBlock &MBB,
+                         MachineBasicBlock::iterator &Inst);
+  bool isVectorRegClass(const Register R);
+  const TargetRegisterClass *
+  getVRLargestSuperClass(const TargetRegisterClass *RC) const;
+  bool handleSubReg(MachineFunction &MF, MachineInstr &MI,
+                    const DeadLaneDetector &DLD);
+};
+
+} // end anonymous namespace
+
+char RISCVInitUndef::ID = 0;
+INITIALIZE_PASS(RISCVInitUndef, DEBUG_TYPE, RISCV_INIT_UNDEF_NAME, false, false)
+char &llvm::RISCVInitUndefID = RISCVInitUndef::ID;
+
+const TargetRegisterClass *
+RISCVInitUndef::getVRLargestSuperClass(const TargetRegisterClass *RC) const {
+  if (RISCV::VRM8RegClass.hasSubClassEq(RC))
+    return &RISCV::VRM8RegClass;
+  if (RISCV::VRM4RegClass.hasSubClassEq(RC))
+    return &RISCV::VRM4RegClass;
+  if (RISCV::VRM2RegClass.hasSubClassEq(RC))
+    return &RISCV::VRM2RegClass;
+  if (RISCV::VRRegClass.hasSubClassEq(RC))
+    return &RISCV::VRRegClass;
+  return RC;
+}
+
+bool RISCVInitUndef::isVectorRegClass(const Register R) {
+  const TargetRegisterClass *RC = MRI->getRegClass(R);
+  return RISCV::VRRegClass.hasSubClassEq(RC) ||
+         RISCV::VRM2RegClass.hasSubClassEq(RC) ||
+         RISCV::VRM4RegClass.hasSubClassEq(RC) ||
+         RISCV::VRM8RegClass.hasSubClassEq(RC);
+}
+
+static unsigned getUndefInitOpcode(unsigned RegClassID) {
+  switch (RegClassID) {
+  case RISCV::VRRegClassID:
+    return RISCV::PseudoRVVInitUndefM1;
+  case RISCV::VRM2RegClassID:
+    return RISCV::PseudoRVVInitUndefM2;
+  case RISCV::VRM4RegClassID:
+    return RISCV::PseudoRVVInitUndefM4;
+  case RISCV::VRM8RegClassID:
+    return RISCV::PseudoRVVInitUndefM8;
+  default:
+    llvm_unreachable("Unexpected register class.");
+  }
+}
+
+bool RISCVInitUndef::handleImplicitDef(MachineBasicBlock &MBB,
+                                       MachineBasicBlock::iterator &Inst) {
+  const TargetRegisterInfo &TRI =
+      *MBB.getParent()->getSubtarget().getRegisterInfo();
+
+  assert(Inst->getOpcode() == TargetOpcode::IMPLICIT_DEF);
+
+  Register Reg = Inst->getOperand(0).getReg();
+  if (!Reg.isVirtual())
+    return false;
+
+  bool NeedPseudoInit = false;
+  SmallVector<MachineOperand *, 1> UseMOs;
+  for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
+    MachineInstr *UserMI = MO.getParent();
+
+    bool HasEarlyClobber = false;
+    bool TiedToDef = false;
+    for (MachineOperand &UserMO : UserMI->operands()) {
+      if (!UserMO.isReg())
+        continue;
+      if (UserMO.isEarlyClobber())
+        HasEarlyClobber = true;
+      if (UserMO.isUse() && UserMO.isTied() &&
+          TRI.regsOverlap(UserMO.getReg(), Reg))
+        TiedToDef = true;
+    }
+    if (HasEarlyClobber && !TiedToDef) {
+      NeedPseudoInit = true;
+      UseMOs.push_back(&MO);
+    }
+  }
+
+  if (!NeedPseudoInit)
+    return false;
+
+  LLVM_DEBUG(
+      dbgs() << "Emitting PseudoRVVInitUndef for implicit vector register "
+             << Reg << '\n');
+
+  unsigned RegClassID = getVRLargestSuperClass(MRI->getRegClass(Reg))->getID();
+  unsigned Opcode = getUndefInitOpcode(RegClassID);
+
+  BuildMI(MBB, Inst, Inst->getDebugLoc(), TII->get(Opcode), Reg);
+
+  Inst = MBB.erase(Inst);
+
+  for (auto MO : UseMOs)
+    MO->setIsUndef(false);
+
+  return true;
+}
+
+static bool isEarlyClobberMI(MachineInstr &MI) {
+  return llvm::any_of(MI.defs(), [](const MachineOperand &DefMO) {
+    return DefMO.isReg() && DefMO.isEarlyClobber();
+  });
+}
+
+bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
+                                  const DeadLaneDetector &DLD) {
+  bool Changed = false;
+
+  for (MachineOperand &UseMO : MI.uses()) {
+    if (!UseMO.isReg())
+      continue;
+    if (!UseMO.getReg().isVirtual())
+      continue;
+
+    Register Reg = UseMO.getReg();
+    DeadLaneDetector::VRegInfo Info =
+        DLD.getVRegInfo(Register::virtReg2Index(Reg));
+
+    if (Info.UsedLanes == Info.DefinedLanes)
+      continue;
+
+    const TargetRegisterClass *TargetRegClass =
+        getVRLargestSuperClass(MRI->getRegClass(Reg));
+
+    LaneBitmask NeedDef = Info.UsedLanes & ~Info.DefinedLanes;
+
+    LLVM_DEBUG({
+      dbgs() << "Instruction has undef subregister.\n";
+      dbgs() << printReg(Reg, nullptr)
+             << " Used: " << PrintLaneMask(Info.UsedLanes)
+             << " Def: " << PrintLaneMask(Info.DefinedLanes)
+             << " Need Def: " << PrintLaneMask(NeedDef) << "\n";
+    });
+
+    SmallVector<unsigned> SubRegIndexNeedInsert;
+    TRI->getCoveringSubRegIndexes(*MRI, TargetRegClass, NeedDef,
+                                  SubRegIndexNeedInsert);
+
+    Register LatestReg = Reg;
+    for (auto ind : SubRegIndexNeedInsert) {
+      Changed = true;
+      const TargetRegisterClass *SubRegClass =
+          getVRLargestSuperClass(TRI->getSubRegisterClass(TargetRegClass, ind));
+      Register TmpInitSubReg = MRI->createVirtualRegister(SubRegClass);
+      BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(),
+              TII->get(getUndefInitOpcode(SubRegClass->getID())),
+              TmpInitSubReg);
+      Register NewReg = MRI->createVirtualRegister(TargetRegClass);
+      BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(),
+              TII->get(TargetOpcode::INSERT_SUBREG), NewReg)
+          .addReg(LatestReg)
+          .addReg(TmpInitSubReg)
+          .addImm(ind);
+      LatestReg = NewReg;
+    }
+
+    UseMO.setReg(LatestReg);
+  }
+
+  return Changed;
+}
+
+bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
+                                       MachineBasicBlock &MBB,
+                                       const DeadLaneDetector &DLD) {
+  bool Changed = false;
+  for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
+    MachineInstr &MI = *I;
+    if (ST->enableSubRegLiveness() && isEarlyClobberMI(MI))
+      Changed |= handleSubReg(MF, MI, DLD);
+    if (MI.isImplicitDef()) {
+      auto DstReg = MI.getOperand(0).getReg();
+      if (isVectorRegClass(DstReg))
+        Changed |= handleImplicitDef(MBB, I);
+    }
+  }
+  return Changed;
+}
+
+bool RISCVInitUndef::runOnMachineFunction(MachineFunction &MF) {
+  ST = &MF.getSubtarget<RISCVSubtarget>();
+  if (!ST->hasVInstructions())
+    return false;
+
+  MRI = &MF.getRegInfo();
+  TII = ST->getInstrInfo();
+  TRI = MRI->getTargetRegisterInfo();
+
+  bool Changed = false;
+  DeadLaneDetector DLD(MRI, TRI);
+  DLD.computeSubRegisterLaneBitInfo();
+
+  for (MachineBasicBlock &BB : MF)
+    Changed |= processBasicBlock(MF, BB, DLD);
+
+  return Changed;
+}
+
+FunctionPass *llvm::createRISCVInitUndefPass() { return new RISCVInitUndef(); }

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 876caa2e339f..4b510df2bc5f 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -81,6 +81,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   initializeRISCVExpandPseudoPass(*PR);
   initializeRISCVInsertVSETVLIPass(*PR);
   initializeRISCVDAGToDAGISelPass(*PR);
+  initializeRISCVInitUndefPass(*PR);
 }
 
 static StringRef computeDataLayout(const Triple &TT) {
@@ -260,6 +261,7 @@ class RISCVPassConfig : public TargetPassConfig {
   void addMachineSSAOptimization() override;
   void addPreRegAlloc() override;
   void addPostRegAlloc() override;
+  void addOptimizedRegAlloc() override;
 };
 } // namespace
 
@@ -355,6 +357,13 @@ void RISCVPassConfig::addPreRegAlloc() {
   addPass(createRISCVInsertVSETVLIPass());
 }
 
+void RISCVPassConfig::addOptimizedRegAlloc() {
+  if (getOptimizeRegAlloc())
+    insertPass(&DetectDeadLanesID, &RISCVInitUndefID);
+
+  TargetPassConfig::addOptimizedRegAlloc();
+}
+
 void RISCVPassConfig::addPostRegAlloc() {
   if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
     addPass(createRISCVRedundantCopyEliminationPass());

diff  --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index 986f608eeecc..3cc231bd1fcf 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -109,6 +109,7 @@
 ; CHECK-NEXT:       RISCV Merge Base Offset
 ; CHECK-NEXT:       RISCV Insert VSETVLI pass
 ; CHECK-NEXT:       Detect Dead Lanes
+; CHECK-NEXT:       RISCV init undef pass
 ; CHECK-NEXT:       Process Implicit Definitions
 ; CHECK-NEXT:       Remove unreachable machine basic blocks
 ; CHECK-NEXT:       Live Variable Analysis

diff  --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 66a887ae73cf..729056b33752 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -26,26 +26,26 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 24 * vlenb
 ; CHECK-NEXT:    li a0, 55
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vloxseg2ei32.v v8, (a0), v8
+; CHECK-NEXT:    vloxseg2ei32.v v16, (a0), v8
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 2
-; CHECK-NEXT:    vs4r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vs4r.v v16, (a0) # Unknown-size Folded Spill
 ; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vs4r.v v12, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vs4r.v v20, (a0) # Unknown-size Folded Spill
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    li s0, 36
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT:    vfwadd.vv v16, v8, v8, v0.t
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
 ; CHECK-NEXT:    call func at plt
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -103,23 +103,23 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb
 ; SUBREGLIVENESS-NEXT:    li a0, 55
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT:    vloxseg2ei32.v v8, (a0), v8
+; SUBREGLIVENESS-NEXT:    vloxseg2ei32.v v16, (a0), v8
 ; SUBREGLIVENESS-NEXT:    csrr a0, vlenb
 ; SUBREGLIVENESS-NEXT:    slli a0, a0, 3
 ; SUBREGLIVENESS-NEXT:    add a0, sp, a0
 ; SUBREGLIVENESS-NEXT:    addi a0, a0, 16
 ; SUBREGLIVENESS-NEXT:    csrr a1, vlenb
 ; SUBREGLIVENESS-NEXT:    slli a1, a1, 2
-; SUBREGLIVENESS-NEXT:    vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT:    vs4r.v v16, (a0) # Unknown-size Folded Spill
 ; SUBREGLIVENESS-NEXT:    add a0, a0, a1
-; SUBREGLIVENESS-NEXT:    vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT:    vs4r.v v20, (a0) # Unknown-size Folded Spill
 ; SUBREGLIVENESS-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; SUBREGLIVENESS-NEXT:    vmclr.m v0
 ; SUBREGLIVENESS-NEXT:    li s0, 36
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT:    vfwadd.vv v8, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v8, v0.t
 ; SUBREGLIVENESS-NEXT:    addi a0, sp, 16
-; SUBREGLIVENESS-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
 ; SUBREGLIVENESS-NEXT:    call func at plt
 ; SUBREGLIVENESS-NEXT:    li a0, 32
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, a0, e16, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
index 925d8027f10e..7c97475e72e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc %s -mtriple=riscv64 -mattr=+v -riscv-enable-subreg-liveness -run-pass=none -o - | FileCheck %s
+# RUN: llc %s -mtriple=riscv64 -mattr=+v -riscv-enable-subreg-liveness -run-pass=riscv-init-undef -o - | FileCheck %s
 
 ...
 ---
@@ -12,7 +12,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1
+    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M4 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -42,7 +46,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0
+    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M4 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -72,7 +80,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3
+    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M4 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -102,7 +114,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2
+    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M4 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -132,7 +148,9 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
+    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M4 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -162,7 +180,9 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
+    ; CHECK-NEXT: early-clobber %4:vrm4 = PseudoVRGATHER_VI_M4 killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M4 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -193,7 +213,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -223,7 +249,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -253,7 +285,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -283,7 +321,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -313,7 +357,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_4
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_3
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_5
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -343,7 +393,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_5
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_3
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_4
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -373,7 +429,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_6
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_2
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_7
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -403,7 +465,13 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_7
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_2
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_6
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -433,7 +501,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -463,7 +535,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -493,7 +569,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_2
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_3
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -523,7 +603,11 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_3
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM2_:%[0-9]+]]:vrm2 = PseudoRVVInitUndefM2
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_2
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -553,7 +637,9 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
@@ -583,7 +669,9 @@ body:             |
     ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 killed [[ADDI]], 0, 5 /* e32 */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM4_:%[0-9]+]]:vrm4 = PseudoRVVInitUndefM4
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
+    ; CHECK-NEXT: early-clobber %4:vrm8 = PseudoVRGATHER_VI_M8 killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
     ; CHECK-NEXT: PseudoVSE32_V_M8 killed %4, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
index 2c63bba20291..4882add84b5b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
@@ -7,9 +7,9 @@ define dso_local signext i32 @undef_early_clobber_chain() {
 ; CHECK-NEXT:    addi sp, sp, -400
 ; CHECK-NEXT:    .cfi_def_cfa_offset 400
 ; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, ma
-; CHECK-NEXT:    vrgather.vi v8, v8, 0
+; CHECK-NEXT:    vrgather.vi v9, v8, 0
 ; CHECK-NEXT:    mv a0, sp
-; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    addi sp, sp, 400
 ; CHECK-NEXT:    ret
@@ -51,10 +51,10 @@ define internal void @SubRegLivenessUndefInPhi(i64 %cond) {
 ; CHECK-NEXT:  .LBB1_3: # %UseSR
 ; CHECK-NEXT:    vl1r.v v14, (zero)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, m1, ta, ma
-; CHECK-NEXT:    vrgatherei16.vv v15, v14, v8
+; CHECK-NEXT:    vrgatherei16.vv v13, v14, v8
 ; CHECK-NEXT:    vrgatherei16.vv v8, v14, v10
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vv v8, v15, v8
+; CHECK-NEXT:    vand.vv v8, v13, v8
 ; CHECK-NEXT:    vsetivli zero, 4, e8, m1, ta, ma
 ; CHECK-NEXT:    vrgatherei16.vv v9, v14, v12
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -106,16 +106,16 @@ define internal void @SubRegLivenessUndef() {
 ; CHECK-NEXT:    vadd.vi v12, v8, 3
 ; CHECK-NEXT:  .LBB2_1: # %loopIR3.i.i
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl1r.v v9, (zero)
+; CHECK-NEXT:    vl1r.v v14, (zero)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, m1, ta, ma
-; CHECK-NEXT:    vrgatherei16.vv v11, v9, v8
-; CHECK-NEXT:    vrgatherei16.vv v13, v9, v10
+; CHECK-NEXT:    vrgatherei16.vv v13, v14, v8
+; CHECK-NEXT:    vrgatherei16.vv v9, v14, v10
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vv v11, v11, v13
+; CHECK-NEXT:    vand.vv v9, v13, v9
 ; CHECK-NEXT:    vsetivli zero, 4, e8, m1, ta, ma
-; CHECK-NEXT:    vrgatherei16.vv v13, v9, v12
+; CHECK-NEXT:    vrgatherei16.vv v11, v14, v12
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vv v9, v11, v13
+; CHECK-NEXT:    vand.vv v9, v9, v11
 ; CHECK-NEXT:    vs1r.v v9, (zero)
 ; CHECK-NEXT:    j .LBB2_1
 loopIR.preheader.i.i:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index f9be88ac4e20..09d926787d52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -44,10 +44,10 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
 ; SUBREG-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SUBREG-NEXT:    vl1r.v v9, (zero)
 ; SUBREG-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
-; SUBREG-NEXT:    vmv1r.v v11, v12
-; SUBREG-NEXT:    vrgatherei16.vv v11, v9, v10
+; SUBREG-NEXT:    vmv1r.v v13, v12
+; SUBREG-NEXT:    vrgatherei16.vv v13, v9, v10
 ; SUBREG-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; SUBREG-NEXT:    vand.vv v9, v8, v11
+; SUBREG-NEXT:    vand.vv v9, v8, v13
 ; SUBREG-NEXT:    vs1r.v v9, (zero)
 ; SUBREG-NEXT:    j .LBB0_1
 loopIR.preheader.i.i:


        


More information about the llvm-commits mailing list