[llvm] [RISCV] Make InitUndef handle undef operand (PR #65755)

Piyou Chen via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 11 05:08:55 PDT 2023


https://github.com/BeMg updated https://github.com/llvm/llvm-project/pull/65755:

>From 2d72c05bd3f37f74d19bfc75af3b31b10aea1ae3 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Fri, 8 Sep 2023 01:34:08 -0700
Subject: [PATCH 1/3] [RISCV][NFC] precommit for 65704

---
 .../RISCV/65704-illegal-instruction.ll        | 64 +++++++++++++++++++
 1 file changed, 64 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll

diff --git a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
new file mode 100644
index 000000000000000..a72b8174d7a36ed
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+m,+zfh,+zvfh \
+; RUN:  < %s | FileCheck %s
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8>, i64 immarg) #3
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8>, <16 x i8>, i64 immarg) #3
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, i64, i64 immarg)
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32>, <4 x i32>, i64 immarg) #3
+
+define void @foo(<vscale x 8 x i8> %0) #2 {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v10, 0
+; CHECK-NEXT:    vsetivli zero, 0, e8, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v10, v9, 0
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s0, v10
+; CHECK-NEXT:    vsetivli zero, 0, e8, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v8, 0
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s1, v8
+; CHECK-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    li a1, 0
+; CHECK-NEXT:    mv a0, s0
+; CHECK-NEXT:    mv a2, s1
+; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    jalr a1
+; CHECK-NEXT:    j .LBB0_1
+  %2 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> undef, i64 0)
+  %3 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> poison, i64 0)
+  br label %4
+
+4:                                                ; preds = %4, %1
+  %5 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> zeroinitializer, <vscale x 8 x i8> %2, i64 0, i64 0, i64 0)
+  %6 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %5, i64 0)
+  %7 = bitcast <16 x i8> %6 to <2 x i64>
+  %8 = extractelement <2 x i64> %7, i64 0
+  %9 = insertvalue [2 x i64] zeroinitializer, i64 %8, 0
+  %10 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %0, <vscale x 8 x i8> %3, i64 0, i64 0, i64 0)
+  %11 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %10, i64 0)
+  %12 = bitcast <16 x i8> %11 to <2 x i64>
+  %13 = extractelement <2 x i64> %12, i64 0
+  %14 = insertvalue [2 x i64] zeroinitializer, i64 %13, 0
+  %15 = tail call fastcc [2 x i64] null([2 x i64] %9, [2 x i64] %14, [2 x i64] zeroinitializer)
+  br label %4
+}

>From bacdb0ff68e0758399061455ea87d805bbb9ac4d Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Thu, 7 Sep 2023 23:20:50 -0700
Subject: [PATCH 2/3] [RISCV] InitUndef also handle undef

Bug report from https://github.com/llvm/llvm-project/issues/65704.

The InitUndef pass miss the pattern that operand is not implict_def but undef directly.

This patch support this pattern in InitUndef pass.
---
 llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp   | 27 +++++++++++++++++++
 .../RISCV/65704-illegal-instruction.ll        |  2 +-
 ...regalloc-last-chance-recoloring-failure.ll |  4 +--
 3 files changed, 30 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
index 7c6a89b6036fa3c..706758a2200115b 100644
--- a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
@@ -84,6 +84,7 @@ class RISCVInitUndef : public MachineFunctionPass {
   getVRLargestSuperClass(const TargetRegisterClass *RC) const;
   bool handleSubReg(MachineFunction &MF, MachineInstr &MI,
                     const DeadLaneDetector &DLD);
+  bool fixupUndefOperandOnly(MachineInstr *MI);
 };
 
 } // end anonymous namespace
@@ -245,6 +246,30 @@ bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
   return Changed;
 }
 
+bool RISCVInitUndef::fixupUndefOperandOnly(MachineInstr *MI) {
+  bool Changed = false;
+  for (auto &UseMO : MI->uses()) {
+    if (!UseMO.isReg())
+      continue;
+    if (UseMO.isTied())
+      continue;
+    if (!UseMO.isUndef())
+      continue;
+    if (!isVectorRegClass(UseMO.getReg()))
+      continue;
+    const TargetRegisterClass *TargetRegClass =
+        getVRLargestSuperClass(MRI->getRegClass(UseMO.getReg()));
+    unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
+    Register NewReg = MRI->createVirtualRegister(TargetRegClass);
+    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(Opcode), NewReg);
+    UseMO.setReg(NewReg);
+    UseMO.setIsUndef(false);
+    Changed = true;
+  }
+
+  return Changed;
+}
+
 bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
                                        MachineBasicBlock &MBB,
                                        const DeadLaneDetector &DLD) {
@@ -273,6 +298,8 @@ bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
 
     if (ST->enableSubRegLiveness() && isEarlyClobberMI(MI))
       Changed |= handleSubReg(MF, MI, DLD);
+    if (isEarlyClobberMI(MI))
+      Changed |= fixupUndefOperandOnly(&MI);
     if (MI.isImplicitDef()) {
       auto DstReg = MI.getOperand(0).getReg();
       if (isVectorRegClass(DstReg))
diff --git a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
index a72b8174d7a36ed..a131b14fa2ba52d 100644
--- a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
+++ b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
@@ -32,7 +32,7 @@ define void @foo(<vscale x 8 x i8> %0) #2 {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s s0, v10
 ; CHECK-NEXT:    vsetivli zero, 0, e8, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v8, 0
+; CHECK-NEXT:    vslideup.vi v8, v9, 0
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s s1, v8
 ; CHECK-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index c15321057aeb86b..8f7923889b99e85 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -117,13 +117,13 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    vmclr.m v0
 ; SUBREGLIVENESS-NEXT:    li s0, 36
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; SUBREGLIVENESS-NEXT:    addi a0, sp, 16
 ; SUBREGLIVENESS-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
 ; SUBREGLIVENESS-NEXT:    call func at plt
 ; SUBREGLIVENESS-NEXT:    li a0, 32
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT:    vrgather.vv v16, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT:    vrgather.vv v16, v8, v12, v0.t
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; SUBREGLIVENESS-NEXT:    csrr a1, vlenb
 ; SUBREGLIVENESS-NEXT:    slli a1, a1, 3

>From 4fc6b04f03e6adb9df769642cdf5705738cfdfee Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Fri, 8 Sep 2023 05:10:38 -0700
Subject: [PATCH 3/3] [RISCV] Merge handleImplicitDef and fixupUndefOperandOnly

They share the same pattern of replacing the Operand with PseudoRVVInitUndef.

This patch

1. reduces the logic for finding MachineInstr that needs to be fixed.
2. emit PseudoRVVInitUndef just before the user's operand to reduce register pressure (shorter LiveInterval).
---
 llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp   | 117 +++++++-----------
 .../RISCV/65704-illegal-instruction.ll        |   6 +-
 ...regalloc-last-chance-recoloring-failure.ll |   4 +-
 .../RISCV/rvv/undef-earlyclobber-chain.mir    |   4 +-
 4 files changed, 49 insertions(+), 82 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
index 706758a2200115b..2f838392eb6b34d 100644
--- a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
@@ -77,14 +77,13 @@ class RISCVInitUndef : public MachineFunctionPass {
 private:
   bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
                          const DeadLaneDetector &DLD);
-  bool handleImplicitDef(MachineBasicBlock &MBB,
-                         MachineBasicBlock::iterator &Inst);
   bool isVectorRegClass(const Register R);
   const TargetRegisterClass *
   getVRLargestSuperClass(const TargetRegisterClass *RC) const;
   bool handleSubReg(MachineFunction &MF, MachineInstr &MI,
                     const DeadLaneDetector &DLD);
-  bool fixupUndefOperandOnly(MachineInstr *MI);
+  bool fixupIllOperand(MachineInstr *MI, MachineOperand &MO);
+  bool handleReg(MachineInstr *MI);
 };
 
 } // end anonymous namespace
@@ -135,53 +134,32 @@ static bool isEarlyClobberMI(MachineInstr &MI) {
   });
 }
 
-bool RISCVInitUndef::handleImplicitDef(MachineBasicBlock &MBB,
-                                       MachineBasicBlock::iterator &Inst) {
-  assert(Inst->getOpcode() == TargetOpcode::IMPLICIT_DEF);
-
-  Register Reg = Inst->getOperand(0).getReg();
-  if (!Reg.isVirtual())
-    return false;
-
-  bool HasOtherUse = false;
-  SmallVector<MachineOperand *, 1> UseMOs;
-  for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
-    if (isEarlyClobberMI(*MO.getParent())) {
-      if (MO.isUse() && !MO.isTied())
-        UseMOs.push_back(&MO);
-      else
-        HasOtherUse = true;
-    }
+static bool findImplictDefMIFromReg(Register Reg, MachineRegisterInfo *MRI) {
+  for (auto &DefMI : MRI->def_instructions(Reg)) {
+    if (DefMI.getOpcode() == TargetOpcode::IMPLICIT_DEF)
+      return true;
   }
+  return false;
+}
 
-  if (UseMOs.empty())
-    return false;
-
-  LLVM_DEBUG(
-      dbgs() << "Emitting PseudoRVVInitUndef for implicit vector register "
-             << Reg << '\n');
-
-  const TargetRegisterClass *TargetRegClass =
-    getVRLargestSuperClass(MRI->getRegClass(Reg));
-  unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
-
-  Register NewDest = Reg;
-  if (HasOtherUse) {
-    NewDest = MRI->createVirtualRegister(TargetRegClass);
-    // We don't have a way to update dead lanes, so keep track of the
-    // new register so that we avoid querying it later.
-    NewRegs.insert(NewDest);
-  }
-  BuildMI(MBB, Inst, Inst->getDebugLoc(), TII->get(Opcode), NewDest);
-
-  if (!HasOtherUse)
-    Inst = MBB.erase(Inst);
+bool RISCVInitUndef::handleReg(MachineInstr *MI) {
+  bool Changed = false;
+  for (auto &UseMO : MI->uses()) {
+    if (!UseMO.isReg())
+      continue;
+    if (UseMO.isTied())
+      continue;
+    if (!UseMO.getReg().isVirtual())
+      continue;
+    if (!isVectorRegClass(UseMO.getReg()))
+      continue;
+    if (UseMO.getReg() == 0)
+      continue;
 
-  for (auto MO : UseMOs) {
-    MO->setReg(NewDest);
-    MO->setIsUndef(false);
+    if (UseMO.isUndef() || findImplictDefMIFromReg(UseMO.getReg(), MRI))
+      Changed |= fixupIllOperand(MI, UseMO);
   }
-  return true;
+  return Changed;
 }
 
 bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
@@ -246,28 +224,21 @@ bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
   return Changed;
 }
 
-bool RISCVInitUndef::fixupUndefOperandOnly(MachineInstr *MI) {
-  bool Changed = false;
-  for (auto &UseMO : MI->uses()) {
-    if (!UseMO.isReg())
-      continue;
-    if (UseMO.isTied())
-      continue;
-    if (!UseMO.isUndef())
-      continue;
-    if (!isVectorRegClass(UseMO.getReg()))
-      continue;
-    const TargetRegisterClass *TargetRegClass =
-        getVRLargestSuperClass(MRI->getRegClass(UseMO.getReg()));
-    unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
-    Register NewReg = MRI->createVirtualRegister(TargetRegClass);
-    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(Opcode), NewReg);
-    UseMO.setReg(NewReg);
-    UseMO.setIsUndef(false);
-    Changed = true;
-  }
+bool RISCVInitUndef::fixupIllOperand(MachineInstr *MI, MachineOperand &MO) {
 
-  return Changed;
+  LLVM_DEBUG(
+      dbgs() << "Emitting PseudoRVVInitUndef for implicit vector register "
+             << MO.getReg() << '\n');
+
+  const TargetRegisterClass *TargetRegClass =
+      getVRLargestSuperClass(MRI->getRegClass(MO.getReg()));
+  unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
+  Register NewReg = MRI->createVirtualRegister(TargetRegClass);
+  BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(Opcode), NewReg);
+  MO.setReg(NewReg);
+  if (MO.isUndef())
+    MO.setIsUndef(false);
+  return true;
 }
 
 bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
@@ -296,14 +267,10 @@ bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
       }
     }
 
-    if (ST->enableSubRegLiveness() && isEarlyClobberMI(MI))
-      Changed |= handleSubReg(MF, MI, DLD);
-    if (isEarlyClobberMI(MI))
-      Changed |= fixupUndefOperandOnly(&MI);
-    if (MI.isImplicitDef()) {
-      auto DstReg = MI.getOperand(0).getReg();
-      if (isVectorRegClass(DstReg))
-        Changed |= handleImplicitDef(MBB, I);
+    if (isEarlyClobberMI(MI)) {
+      if (ST->enableSubRegLiveness())
+        Changed |= handleSubReg(MF, MI, DLD);
+      Changed |= handleReg(&MI);
     }
   }
   return Changed;
diff --git a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
index a131b14fa2ba52d..780459bae1cea63 100644
--- a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
+++ b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
@@ -26,11 +26,11 @@ define void @foo(<vscale x 8 x i8> %0) #2 {
 ; CHECK-NEXT:    .cfi_offset s0, -16
 ; CHECK-NEXT:    .cfi_offset s1, -24
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
+; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vsetivli zero, 0, e8, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v10, v9, 0
+; CHECK-NEXT:    vslideup.vi v9, v10, 0
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s s0, v10
+; CHECK-NEXT:    vmv.x.s s0, v9
 ; CHECK-NEXT:    vsetivli zero, 0, e8, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 0
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 8f7923889b99e85..b7eac0ba4e4cc9c 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -40,7 +40,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    li s0, 36
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwadd.vv v16, v8, v8, v0.t
+; CHECK-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add a0, sp, a0
@@ -49,7 +49,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    call func at plt
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vrgather.vv v4, v8, v8, v0.t
+; CHECK-NEXT:    vrgather.vv v4, v8, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir
index 08ea967179ebf83..58b2687824aa146 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir
@@ -76,9 +76,9 @@ machineFunctionInfo:
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: undef_early_clobber_chain
-    ; CHECK: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
     ; CHECK-NEXT: early-clobber %1:vr = PseudoVRGATHER_VI_M1 undef [[DEF]], [[PseudoRVVInitUndefM1_]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY %1
     ; CHECK-NEXT: PseudoRET implicit $v8



More information about the llvm-commits mailing list