[llvm] [RISCV][VLOPT] Don't reduce the VL is the same as CommonVL (PR #123878)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 10:48:04 PST 2025


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/123878

>From 927f8ed5491f6d8916cf53637dde4378039cb179 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 21 Jan 2025 20:40:58 -0800
Subject: [PATCH 1/7] [RISCV][VLOPT] Don't reduce the VL is the same as
 CommonVL

This fixes #123862.
---
 llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 52 ++++++++++++++++++++--
 llvm/test/CodeGen/RISCV/rvv/vl-opt.ll      | 11 +++++
 2 files changed, 60 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 66d26bf5b11e2d..e475648f4c08cf 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1308,9 +1308,55 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI) {
   unsigned VLOpNum = RISCVII::getVLOpNum(MI.getDesc());
   MachineOperand &VLOp = MI.getOperand(VLOpNum);
 
-  if (!RISCV::isVLKnownLE(*CommonVL, VLOp)) {
-    LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not <= VLOp.\n");
-    return false;
+    unsigned VLOpNum = RISCVII::getVLOpNum(MI.getDesc());
+    MachineOperand &VLOp = MI.getOperand(VLOpNum);
+
+    if (!RISCV::isVLKnownLE(*CommonVL, VLOp)) {
+      LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not <= VLOp.\n");
+      continue;
+    }
+
+    if (CommonVL->isImm()) {
+      if (CommonVL->isImm() && VLOp.isImm() &&
+          VLOp.getImm() == CommonVL->getImm()) {
+        LLVM_DEBUG(dbgs() << "  VL is already reduced to" << VLOp << " for "
+                          << MI << "\n");
+        continue;
+      }
+
+      LLVM_DEBUG(dbgs() << "  Reduce VL from " << VLOp << " to "
+                        << CommonVL->getImm() << " for " << MI << "\n");
+      VLOp.ChangeToImmediate(CommonVL->getImm());
+    } else {
+      const MachineInstr *VLMI = MRI->getVRegDef(CommonVL->getReg());
+      if (!MDT->dominates(VLMI, &MI))
+        continue;
+      LLVM_DEBUG(
+          dbgs() << "  Reduce VL from " << VLOp << " to "
+                 << printReg(CommonVL->getReg(), MRI->getTargetRegisterInfo())
+                 << " for " << MI << "\n");
+
+      // All our checks passed. We can reduce VL.
+      VLOp.ChangeToRegister(CommonVL->getReg(), false);
+    }
+
+    MadeChange = true;
+
+    // Now add all inputs to this instruction to the worklist.
+    for (auto &Op : MI.operands()) {
+      if (!Op.isReg() || !Op.isUse() || !Op.getReg().isVirtual())
+        continue;
+
+      if (!isVectorRegClass(Op.getReg(), MRI))
+        continue;
+
+      MachineInstr *DefMI = MRI->getVRegDef(Op.getReg());
+
+      if (!isCandidate(*DefMI))
+        continue;
+
+      Worklist.insert(DefMI);
+    }
   }
 
   if (CommonVL->isImm()) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index 1cc30f077feb4a..d6143f69288e66 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -150,3 +150,14 @@ define <vscale x 4 x i32> @dont_optimize_tied_def(<vscale x 4 x i32> %a, <vscale
   ret <vscale x 4 x i32> %2
 }
 
+define <vscale x 4 x i32> @same_vl_imm(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: same_vl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v10, v12
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 4)
+  %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 4)
+  ret <vscale x 4 x i32> %w
+}

>From d72d13ee4b63efe0431cb67a453682f80b9ae222 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 21 Jan 2025 20:49:55 -0800
Subject: [PATCH 2/7] fixup! generalize to regs

---
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp   | 15 +++++++++++++++
 llvm/lib/Target/RISCV/RISCVInstrInfo.h     |  3 +++
 llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 11 ++---------
 llvm/test/CodeGen/RISCV/rvv/vl-opt.ll      | 12 ++++++++++++
 4 files changed, 32 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 471cd15ee9c870..60946809e2c60f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -4243,6 +4243,21 @@ bool RISCV::isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS) {
   return LHS.getImm() <= RHS.getImm();
 }
 
+/// Given two VL operands, do we know that LHS < RHS?
+bool RISCV::isVLKnownLT(const MachineOperand &LHS, const MachineOperand &RHS) {
+  if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
+      LHS.getReg() == RHS.getReg())
+    return false;
+  if (RHS.isImm() && RHS.getImm() == RISCV::VLMaxSentinel && LHS.isImm() &&
+      LHS.getImm() != RHS.getImm())
+    return true;
+  if (LHS.isImm() && LHS.getImm() == RISCV::VLMaxSentinel)
+    return false;
+  if (!LHS.isImm() || !RHS.isImm())
+    return false;
+  return LHS.getImm() < RHS.getImm();
+}
+
 namespace {
 class RISCVPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
   const MachineInstr *LHS;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 1c81719c767ecb..e345aca377f2fa 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -356,6 +356,9 @@ static constexpr int64_t VLMaxSentinel = -1LL;
 /// Given two VL operands, do we know that LHS <= RHS?
 bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS);
 
+/// Given two VL operands, do we know that LHS < RHS?
+bool isVLKnownLT(const MachineOperand &LHS, const MachineOperand &RHS);
+
 // Mask assignments for floating-point
 static constexpr unsigned FPMASK_Negative_Infinity = 0x001;
 static constexpr unsigned FPMASK_Negative_Normal = 0x002;
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index e475648f4c08cf..b8cfe0b445d9cd 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1311,19 +1311,12 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI) {
     unsigned VLOpNum = RISCVII::getVLOpNum(MI.getDesc());
     MachineOperand &VLOp = MI.getOperand(VLOpNum);
 
-    if (!RISCV::isVLKnownLE(*CommonVL, VLOp)) {
-      LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not <= VLOp.\n");
+    if (!RISCV::isVLKnownLT(*CommonVL, VLOp)) {
+      LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not < VLOp.\n");
       continue;
     }
 
     if (CommonVL->isImm()) {
-      if (CommonVL->isImm() && VLOp.isImm() &&
-          VLOp.getImm() == CommonVL->getImm()) {
-        LLVM_DEBUG(dbgs() << "  VL is already reduced to" << VLOp << " for "
-                          << MI << "\n");
-        continue;
-      }
-
       LLVM_DEBUG(dbgs() << "  Reduce VL from " << VLOp << " to "
                         << CommonVL->getImm() << " for " << MI << "\n");
       VLOp.ChangeToImmediate(CommonVL->getImm());
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index d6143f69288e66..2599571dd78532 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -161,3 +161,15 @@ define <vscale x 4 x i32> @same_vl_imm(<vscale x 4 x i32> %passthru, <vscale x 4
   %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 4)
   ret <vscale x 4 x i32> %w
 }
+
+define <vscale x 4 x i32> @same_vl_reg(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
+; CHECK-LABEL: same_vl_reg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v10, v12
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl)
+  %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen %vl)
+  ret <vscale x 4 x i32> %w
+}

>From faf1893f3f807f202fe60e123a1b1ec909004b98 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 22 Jan 2025 09:35:03 -0800
Subject: [PATCH 3/7] fixup! add slow test case

---
 .../CodeGen/RISCV/rvv/vlopt-slow-case.mir     | 299 ++++++++++++++++++
 1 file changed, 299 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir

diff --git a/llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir b/llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir
new file mode 100644
index 00000000000000..6efb657df464bf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir
@@ -0,0 +1,299 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc %s -o - -mtriple=riscv64 -mattr=+v -run-pass=riscv-vl-optimizer -verify-machineinstrs | FileCheck %s
+
+---
+name: test
+body: |
+  bb.0:
+    liveins: $x11
+
+    ; CHECK-LABEL: name: test
+    ; CHECK: liveins: $x11
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
+    ; CHECK-NEXT: [[LD:%[0-9]+]]:gpr = LD [[COPY]], 32
+    ; CHECK-NEXT: [[LD1:%[0-9]+]]:gpr = LD [[LD]], 0
+    ; CHECK-NEXT: [[LD2:%[0-9]+]]:gpr = LD [[LD]], 8
+    ; CHECK-NEXT: [[LD3:%[0-9]+]]:gpr = LD [[LD]], 16
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI killed [[LD3]], 64
+    ; CHECK-NEXT: [[LW:%[0-9]+]]:gpr = LW [[LD1]], 4
+    ; CHECK-NEXT: [[LW1:%[0-9]+]]:gpr = LW [[LD1]], 0
+    ; CHECK-NEXT: [[LWU:%[0-9]+]]:gpr = LWU [[LD1]], 12
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[LW]], [[LW1]]
+    ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 113938
+    ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI killed [[LUI]], -1062
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:gpr = XOR killed [[XOR]], killed [[ADDI1]]
+    ; CHECK-NEXT: [[LW2:%[0-9]+]]:gpr = LW [[LD1]], 8
+    ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI killed [[LW2]], 32
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[SLLI]], killed [[LWU]]
+    ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 $noreg, 4, 6 /* e64 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 $noreg, killed [[PseudoVID_V_M1_]], killed [[OR]], 4, 6 /* e64 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVNSRL_WI_MF2_:%[0-9]+]]:vr = PseudoVNSRL_WI_MF2 $noreg, [[PseudoVADD_VX_M1_]], 0, 4, 5 /* e32 */, 3 /* ta, ma */
+    ; CHECK-NEXT: [[ADDI2:%[0-9]+]]:gpr = ADDI $x0, 32
+    ; CHECK-NEXT: [[PseudoVNSRL_WX_MF2_:%[0-9]+]]:vr = PseudoVNSRL_WX_MF2 $noreg, [[PseudoVADD_VX_M1_]], killed [[ADDI2]], 4, 5 /* e32 */, 3 /* ta, ma */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVNSRL_WI_MF2_]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_1:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVNSRL_WX_MF2_]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_]], [[PseudoVADD_VX_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VX_MF2_1]], 19, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VX_MF2_1]], 13, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_]], killed [[PseudoVSRL_VI_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_]], killed [[PseudoVOR_VV_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_1:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_]], [[PseudoVADD_VV_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_1:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_1:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_1:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_1]], killed [[PseudoVSRL_VI_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_1:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_1]], killed [[PseudoVOR_VV_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_2:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_1]], [[PseudoVADD_VV_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_2:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_1]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_2:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_1]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_2:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_2]], killed [[PseudoVSRL_VI_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_2:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_2]], killed [[PseudoVOR_VV_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_3:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_2]], [[PseudoVADD_VV_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_3:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_2]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_3:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_2]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_3:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_3]], killed [[PseudoVSRL_VI_MF2_3]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_3:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_3]], killed [[PseudoVOR_VV_MF2_3]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_2:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_3]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_3:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_3]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_3]], 1, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_4:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_2]], [[PseudoVADD_VI_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_4:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_4:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_4:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_4]], killed [[PseudoVSRL_VI_MF2_4]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_4:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_4]], killed [[PseudoVOR_VV_MF2_4]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_5:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_4]], [[PseudoVADD_VV_MF2_4]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_5:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_4]], 3, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_5:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_4]], 29, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_5:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_5]], killed [[PseudoVSRL_VI_MF2_5]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_5:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_5]], killed [[PseudoVOR_VV_MF2_5]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_6:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_5]], [[PseudoVADD_VV_MF2_5]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_6:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_5]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_6:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_5]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_6:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_6]], killed [[PseudoVSRL_VI_MF2_6]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_6:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_6]], killed [[PseudoVOR_VV_MF2_6]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_7:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_6]], [[PseudoVADD_VV_MF2_6]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_7:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_6]], 8, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_7:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_6]], 24, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_7:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_7]], killed [[PseudoVSRL_VI_MF2_7]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_7:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_7]], killed [[PseudoVOR_VV_MF2_7]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_4:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_7]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_5:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_7]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_1:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_5]], 2, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_8:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_4]], [[PseudoVADD_VI_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_8:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_1]], 19, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_8:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_1]], 13, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_8:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_8]], killed [[PseudoVSRL_VI_MF2_8]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_8:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_8]], killed [[PseudoVOR_VV_MF2_8]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_9:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_8]], [[PseudoVADD_VV_MF2_8]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_9:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_8]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_9:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_8]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_9:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_9]], killed [[PseudoVSRL_VI_MF2_9]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_9:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_9]], killed [[PseudoVOR_VV_MF2_9]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_10:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_9]], [[PseudoVADD_VV_MF2_9]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_10:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_9]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_10:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_9]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_10:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_10]], killed [[PseudoVSRL_VI_MF2_10]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_10:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_10]], killed [[PseudoVOR_VV_MF2_10]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_11:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_10]], [[PseudoVADD_VV_MF2_10]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_11:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_10]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_11:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_10]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_11:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_11]], killed [[PseudoVSRL_VI_MF2_11]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_11:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_11]], killed [[PseudoVOR_VV_MF2_11]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_6:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_11]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_7:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_11]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_2:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_7]], 3, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_12:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_6]], [[PseudoVADD_VI_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_12:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_2]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_12:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_2]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_12:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_12]], killed [[PseudoVSRL_VI_MF2_12]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_12:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_12]], killed [[PseudoVOR_VV_MF2_12]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_13:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_12]], [[PseudoVADD_VV_MF2_12]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_13:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_12]], 3, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_13:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_12]], 29, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_13:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_13]], killed [[PseudoVSRL_VI_MF2_13]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_13:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_13]], killed [[PseudoVOR_VV_MF2_13]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_14:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_13]], [[PseudoVADD_VV_MF2_13]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_14:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_13]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_14:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_13]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_14:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_14]], killed [[PseudoVSRL_VI_MF2_14]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_14:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_14]], killed [[PseudoVOR_VV_MF2_14]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_15:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_14]], [[PseudoVADD_VV_MF2_14]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_15:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_14]], 8, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_15:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_14]], 24, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_15:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_15]], killed [[PseudoVSRL_VI_MF2_15]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_15:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_15]], killed [[PseudoVOR_VV_MF2_15]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_8:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_15]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_9:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_15]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_3:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_9]], 4, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_16:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_8]], [[PseudoVADD_VI_MF2_3]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_16:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_3]], 19, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_16:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_3]], 13, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_16:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_16]], killed [[PseudoVSRL_VI_MF2_16]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_16:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_16]], killed [[PseudoVOR_VV_MF2_16]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_17:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_16]], [[PseudoVADD_VV_MF2_16]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_17:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_16]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_17:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_16]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_17:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_17]], killed [[PseudoVSRL_VI_MF2_17]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_17:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_17]], killed [[PseudoVOR_VV_MF2_17]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_18:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_17]], [[PseudoVADD_VV_MF2_17]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_18:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_17]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_18:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_17]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_18:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_18]], killed [[PseudoVSRL_VI_MF2_18]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_18:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_18]], killed [[PseudoVOR_VV_MF2_18]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_19:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_18]], [[PseudoVADD_VV_MF2_18]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_19:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_18]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_19:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_18]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_19:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_19]], killed [[PseudoVSRL_VI_MF2_19]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_19:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_19]], killed [[PseudoVOR_VV_MF2_19]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_10:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_19]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_11:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_19]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_4:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_11]], 5, 4, 5 /* e32 */, 1 /* ta, mu */
+    ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_MF2_10]], killed [[LD2]], 4, 5 /* e32 */
+    ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VI_MF2_4]], killed [[ADDI]], 4, 5 /* e32 */
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: $x10 = COPY [[COPY1]]
+    ; CHECK-NEXT: PseudoRET implicit $x10
+    %1:gpr = COPY $x11
+    %3:gpr = LD %1, 32
+    %4:gpr = LD %3, 0
+    %5:gpr = LD %3, 8
+    %6:gpr = LD %3, 16
+    %7:gpr = ADDI killed %6, 64
+    %8:gpr = LW %4, 4
+    %9:gpr = LW %4, 0
+    %10:gpr = LWU %4, 12
+    %11:gpr = XOR %8, %9
+    %12:gpr = LUI 113938
+    %13:gpr = ADDI killed %12, -1062
+    %14:gpr = XOR killed %11, killed %13
+    %15:gpr = LW %4, 8
+    %16:gpr = SLLI killed %15, 32
+    %17:gpr = OR killed %16, killed %10
+    %18:vr = PseudoVID_V_M1 $noreg, 4, 6 /* e64 */, 1 /* ta, mu */
+    %19:vr = PseudoVADD_VX_M1 $noreg, killed %18, killed %17, 4, 6 /* e64 */, 1 /* ta, mu */
+    %20:vr = PseudoVNSRL_WI_MF2 $noreg, %19, 0, 4, 5 /* e32 */, 3 /* ta, ma */
+    %21:gpr = ADDI $x0, 32
+    %22:vr = PseudoVNSRL_WX_MF2 $noreg, %19, killed %21, 4, 5 /* e32 */, 3 /* ta, ma */
+    %23:vr = PseudoVADD_VX_MF2 $noreg, killed %20, %9, 4, 5 /* e32 */, 1 /* ta, mu */
+    %24:vr = PseudoVADD_VX_MF2 $noreg, killed %22, %8, 4, 5 /* e32 */, 1 /* ta, mu */
+    %25:vr = PseudoVADD_VV_MF2 $noreg, killed %23, %24, 4, 5 /* e32 */, 1 /* ta, mu */
+    %26:vr = PseudoVSRL_VI_MF2 $noreg, %24, 19, 4, 5 /* e32 */, 1 /* ta, mu */
+    %27:vr = PseudoVSLL_VI_MF2 $noreg, %24, 13, 4, 5 /* e32 */, 1 /* ta, mu */
+    %28:vr = PseudoVOR_VV_MF2 $noreg, killed %27, killed %26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %29:vr = PseudoVXOR_VV_MF2 $noreg, %25, killed %28, 4, 5 /* e32 */, 1 /* ta, mu */
+    %30:vr = PseudoVADD_VV_MF2 $noreg, %29, %25, 4, 5 /* e32 */, 1 /* ta, mu */
+    %31:vr = PseudoVSRL_VI_MF2 $noreg, %29, 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    %32:vr = PseudoVSLL_VI_MF2 $noreg, %29, 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    %33:vr = PseudoVOR_VV_MF2 $noreg, killed %32, killed %31, 4, 5 /* e32 */, 1 /* ta, mu */
+    %34:vr = PseudoVXOR_VV_MF2 $noreg, %30, killed %33, 4, 5 /* e32 */, 1 /* ta, mu */
+    %35:vr = PseudoVADD_VV_MF2 $noreg, %34, %30, 4, 5 /* e32 */, 1 /* ta, mu */
+    %36:vr = PseudoVSRL_VI_MF2 $noreg, %34, 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    %37:vr = PseudoVSLL_VI_MF2 $noreg, %34, 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %38:vr = PseudoVOR_VV_MF2 $noreg, killed %37, killed %36, 4, 5 /* e32 */, 1 /* ta, mu */
+    %39:vr = PseudoVXOR_VV_MF2 $noreg, %35, killed %38, 4, 5 /* e32 */, 1 /* ta, mu */
+    %40:vr = PseudoVADD_VV_MF2 $noreg, %39, %35, 4, 5 /* e32 */, 1 /* ta, mu */
+    %41:vr = PseudoVSRL_VI_MF2 $noreg, %39, 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %42:vr = PseudoVSLL_VI_MF2 $noreg, %39, 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    %43:vr = PseudoVOR_VV_MF2 $noreg, killed %42, killed %41, 4, 5 /* e32 */, 1 /* ta, mu */
+    %44:vr = PseudoVXOR_VV_MF2 $noreg, %40, killed %43, 4, 5 /* e32 */, 1 /* ta, mu */
+    %45:vr = PseudoVADD_VX_MF2 $noreg, %40, %8, 4, 5 /* e32 */, 1 /* ta, mu */
+    %46:vr = PseudoVADD_VX_MF2 $noreg, killed %44, %14, 4, 5 /* e32 */, 1 /* ta, mu */
+    %47:vr = PseudoVADD_VI_MF2 $noreg, killed %46, 1, 4, 5 /* e32 */, 1 /* ta, mu */
+    %48:vr = PseudoVADD_VV_MF2 $noreg, killed %45, %47, 4, 5 /* e32 */, 1 /* ta, mu */
+    %49:vr = PseudoVSRL_VI_MF2 $noreg, %47, 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    %50:vr = PseudoVSLL_VI_MF2 $noreg, %47, 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    %51:vr = PseudoVOR_VV_MF2 $noreg, killed %50, killed %49, 4, 5 /* e32 */, 1 /* ta, mu */
+    %52:vr = PseudoVXOR_VV_MF2 $noreg, %48, killed %51, 4, 5 /* e32 */, 1 /* ta, mu */
+    %53:vr = PseudoVADD_VV_MF2 $noreg, %52, %48, 4, 5 /* e32 */, 1 /* ta, mu */
+    %54:vr = PseudoVSRL_VI_MF2 $noreg, %52, 3, 4, 5 /* e32 */, 1 /* ta, mu */
+    %55:vr = PseudoVSLL_VI_MF2 $noreg, %52, 29, 4, 5 /* e32 */, 1 /* ta, mu */
+    %56:vr = PseudoVOR_VV_MF2 $noreg, killed %55, killed %54, 4, 5 /* e32 */, 1 /* ta, mu */
+    %57:vr = PseudoVXOR_VV_MF2 $noreg, %53, killed %56, 4, 5 /* e32 */, 1 /* ta, mu */
+    %58:vr = PseudoVADD_VV_MF2 $noreg, %57, %53, 4, 5 /* e32 */, 1 /* ta, mu */
+    %59:vr = PseudoVSRL_VI_MF2 $noreg, %57, 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    %60:vr = PseudoVSLL_VI_MF2 $noreg, %57, 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    %61:vr = PseudoVOR_VV_MF2 $noreg, killed %60, killed %59, 4, 5 /* e32 */, 1 /* ta, mu */
+    %62:vr = PseudoVXOR_VV_MF2 $noreg, %58, killed %61, 4, 5 /* e32 */, 1 /* ta, mu */
+    %63:vr = PseudoVADD_VV_MF2 $noreg, %62, %58, 4, 5 /* e32 */, 1 /* ta, mu */
+    %64:vr = PseudoVSRL_VI_MF2 $noreg, %62, 8, 4, 5 /* e32 */, 1 /* ta, mu */
+    %65:vr = PseudoVSLL_VI_MF2 $noreg, %62, 24, 4, 5 /* e32 */, 1 /* ta, mu */
+    %66:vr = PseudoVOR_VV_MF2 $noreg, killed %65, killed %64, 4, 5 /* e32 */, 1 /* ta, mu */
+    %67:vr = PseudoVXOR_VV_MF2 $noreg, %63, killed %66, 4, 5 /* e32 */, 1 /* ta, mu */
+    %68:vr = PseudoVADD_VX_MF2 $noreg, %63, %14, 4, 5 /* e32 */, 1 /* ta, mu */
+    %69:vr = PseudoVADD_VX_MF2 $noreg, killed %67, %9, 4, 5 /* e32 */, 1 /* ta, mu */
+    %70:vr = PseudoVADD_VI_MF2 $noreg, killed %69, 2, 4, 5 /* e32 */, 1 /* ta, mu */
+    %71:vr = PseudoVADD_VV_MF2 $noreg, killed %68, %70, 4, 5 /* e32 */, 1 /* ta, mu */
+    %72:vr = PseudoVSRL_VI_MF2 $noreg, %70, 19, 4, 5 /* e32 */, 1 /* ta, mu */
+    %73:vr = PseudoVSLL_VI_MF2 $noreg, %70, 13, 4, 5 /* e32 */, 1 /* ta, mu */
+    %74:vr = PseudoVOR_VV_MF2 $noreg, killed %73, killed %72, 4, 5 /* e32 */, 1 /* ta, mu */
+    %75:vr = PseudoVXOR_VV_MF2 $noreg, %71, killed %74, 4, 5 /* e32 */, 1 /* ta, mu */
+    %76:vr = PseudoVADD_VV_MF2 $noreg, %75, %71, 4, 5 /* e32 */, 1 /* ta, mu */
+    %77:vr = PseudoVSRL_VI_MF2 $noreg, %75, 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    %78:vr = PseudoVSLL_VI_MF2 $noreg, %75, 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    %79:vr = PseudoVOR_VV_MF2 $noreg, killed %78, killed %77, 4, 5 /* e32 */, 1 /* ta, mu */
+    %80:vr = PseudoVXOR_VV_MF2 $noreg, %76, killed %79, 4, 5 /* e32 */, 1 /* ta, mu */
+    %81:vr = PseudoVADD_VV_MF2 $noreg, %80, %76, 4, 5 /* e32 */, 1 /* ta, mu */
+    %82:vr = PseudoVSRL_VI_MF2 $noreg, %80, 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    %83:vr = PseudoVSLL_VI_MF2 $noreg, %80, 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %84:vr = PseudoVOR_VV_MF2 $noreg, killed %83, killed %82, 4, 5 /* e32 */, 1 /* ta, mu */
+    %85:vr = PseudoVXOR_VV_MF2 $noreg, %81, killed %84, 4, 5 /* e32 */, 1 /* ta, mu */
+    %86:vr = PseudoVADD_VV_MF2 $noreg, %85, %81, 4, 5 /* e32 */, 1 /* ta, mu */
+    %87:vr = PseudoVSRL_VI_MF2 $noreg, %85, 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %88:vr = PseudoVSLL_VI_MF2 $noreg, %85, 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    %89:vr = PseudoVOR_VV_MF2 $noreg, killed %88, killed %87, 4, 5 /* e32 */, 1 /* ta, mu */
+    %90:vr = PseudoVXOR_VV_MF2 $noreg, %86, killed %89, 4, 5 /* e32 */, 1 /* ta, mu */
+    %91:vr = PseudoVADD_VX_MF2 $noreg, %86, %9, 4, 5 /* e32 */, 1 /* ta, mu */
+    %92:vr = PseudoVADD_VX_MF2 $noreg, killed %90, %8, 4, 5 /* e32 */, 1 /* ta, mu */
+    %93:vr = PseudoVADD_VI_MF2 $noreg, killed %92, 3, 4, 5 /* e32 */, 1 /* ta, mu */
+    %94:vr = PseudoVADD_VV_MF2 $noreg, killed %91, %93, 4, 5 /* e32 */, 1 /* ta, mu */
+    %95:vr = PseudoVSRL_VI_MF2 $noreg, %93, 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    %96:vr = PseudoVSLL_VI_MF2 $noreg, %93, 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    %97:vr = PseudoVOR_VV_MF2 $noreg, killed %96, killed %95, 4, 5 /* e32 */, 1 /* ta, mu */
+    %98:vr = PseudoVXOR_VV_MF2 $noreg, %94, killed %97, 4, 5 /* e32 */, 1 /* ta, mu */
+    %99:vr = PseudoVADD_VV_MF2 $noreg, %98, %94, 4, 5 /* e32 */, 1 /* ta, mu */
+    %100:vr = PseudoVSRL_VI_MF2 $noreg, %98, 3, 4, 5 /* e32 */, 1 /* ta, mu */
+    %101:vr = PseudoVSLL_VI_MF2 $noreg, %98, 29, 4, 5 /* e32 */, 1 /* ta, mu */
+    %102:vr = PseudoVOR_VV_MF2 $noreg, killed %101, killed %100, 4, 5 /* e32 */, 1 /* ta, mu */
+    %103:vr = PseudoVXOR_VV_MF2 $noreg, %99, killed %102, 4, 5 /* e32 */, 1 /* ta, mu */
+    %104:vr = PseudoVADD_VV_MF2 $noreg, %103, %99, 4, 5 /* e32 */, 1 /* ta, mu */
+    %105:vr = PseudoVSRL_VI_MF2 $noreg, %103, 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    %106:vr = PseudoVSLL_VI_MF2 $noreg, %103, 16, 4, 5 /* e32 */, 1 /* ta, mu */
+    %107:vr = PseudoVOR_VV_MF2 $noreg, killed %106, killed %105, 4, 5 /* e32 */, 1 /* ta, mu */
+    %108:vr = PseudoVXOR_VV_MF2 $noreg, %104, killed %107, 4, 5 /* e32 */, 1 /* ta, mu */
+    %109:vr = PseudoVADD_VV_MF2 $noreg, %108, %104, 4, 5 /* e32 */, 1 /* ta, mu */
+    %110:vr = PseudoVSRL_VI_MF2 $noreg, %108, 8, 4, 5 /* e32 */, 1 /* ta, mu */
+    %111:vr = PseudoVSLL_VI_MF2 $noreg, %108, 24, 4, 5 /* e32 */, 1 /* ta, mu */
+    %112:vr = PseudoVOR_VV_MF2 $noreg, killed %111, killed %110, 4, 5 /* e32 */, 1 /* ta, mu */
+    %113:vr = PseudoVXOR_VV_MF2 $noreg, %109, killed %112, 4, 5 /* e32 */, 1 /* ta, mu */
+    %114:vr = PseudoVADD_VX_MF2 $noreg, %109, %8, 4, 5 /* e32 */, 1 /* ta, mu */
+    %115:vr = PseudoVADD_VX_MF2 $noreg, killed %113, %14, 4, 5 /* e32 */, 1 /* ta, mu */
+    %116:vr = PseudoVADD_VI_MF2 $noreg, killed %115, 4, 4, 5 /* e32 */, 1 /* ta, mu */
+    %117:vr = PseudoVADD_VV_MF2 $noreg, killed %114, %116, 4, 5 /* e32 */, 1 /* ta, mu */
+    %118:vr = PseudoVSRL_VI_MF2 $noreg, %116, 19, 4, 5 /* e32 */, 1 /* ta, mu */
+    %119:vr = PseudoVSLL_VI_MF2 $noreg, %116, 13, 4, 5 /* e32 */, 1 /* ta, mu */
+    %120:vr = PseudoVOR_VV_MF2 $noreg, killed %119, killed %118, 4, 5 /* e32 */, 1 /* ta, mu */
+    %121:vr = PseudoVXOR_VV_MF2 $noreg, %117, killed %120, 4, 5 /* e32 */, 1 /* ta, mu */
+    %122:vr = PseudoVADD_VV_MF2 $noreg, %121, %117, 4, 5 /* e32 */, 1 /* ta, mu */
+    %123:vr = PseudoVSRL_VI_MF2 $noreg, %121, 17, 4, 5 /* e32 */, 1 /* ta, mu */
+    %124:vr = PseudoVSLL_VI_MF2 $noreg, %121, 15, 4, 5 /* e32 */, 1 /* ta, mu */
+    %125:vr = PseudoVOR_VV_MF2 $noreg, killed %124, killed %123, 4, 5 /* e32 */, 1 /* ta, mu */
+    %126:vr = PseudoVXOR_VV_MF2 $noreg, %122, killed %125, 4, 5 /* e32 */, 1 /* ta, mu */
+    %127:vr = PseudoVADD_VV_MF2 $noreg, %126, %122, 4, 5 /* e32 */, 1 /* ta, mu */
+    %128:vr = PseudoVSRL_VI_MF2 $noreg, %126, 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    %129:vr = PseudoVSLL_VI_MF2 $noreg, %126, 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %130:vr = PseudoVOR_VV_MF2 $noreg, killed %129, killed %128, 4, 5 /* e32 */, 1 /* ta, mu */
+    %131:vr = PseudoVXOR_VV_MF2 $noreg, %127, killed %130, 4, 5 /* e32 */, 1 /* ta, mu */
+    %132:vr = PseudoVADD_VV_MF2 $noreg, %131, %127, 4, 5 /* e32 */, 1 /* ta, mu */
+    %133:vr = PseudoVSRL_VI_MF2 $noreg, %131, 26, 4, 5 /* e32 */, 1 /* ta, mu */
+    %134:vr = PseudoVSLL_VI_MF2 $noreg, %131, 6, 4, 5 /* e32 */, 1 /* ta, mu */
+    %135:vr = PseudoVOR_VV_MF2 $noreg, killed %134, killed %133, 4, 5 /* e32 */, 1 /* ta, mu */
+    %136:vr = PseudoVXOR_VV_MF2 $noreg, %132, killed %135, 4, 5 /* e32 */, 1 /* ta, mu */
+    %137:vr = PseudoVADD_VX_MF2 $noreg, %132, %14, 4, 5 /* e32 */, 1 /* ta, mu */
+    %138:vr = PseudoVADD_VX_MF2 $noreg, killed %136, %9, 4, 5 /* e32 */, 1 /* ta, mu */
+    %139:vr = PseudoVADD_VI_MF2 $noreg, killed %138, 5, 4, 5 /* e32 */, 1 /* ta, mu */
+    PseudoVSE32_V_MF2 killed %137, killed %5, 4, 5 /* e32 */
+    PseudoVSE32_V_MF2 killed %139, killed %7, 4, 5 /* e32 */
+    %140:gpr = COPY $x0
+    $x10 = COPY %140
+    PseudoRET implicit $x10
+...

>From 2a14cd1f89b12baa5231c199d37c66d9e15fba0f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 22 Jan 2025 09:43:42 -0800
Subject: [PATCH 4/7] fixup! respond to review

---
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp   | 15 ---------------
 llvm/lib/Target/RISCV/RISCVInstrInfo.h     |  3 ---
 llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 11 +++++++++--
 3 files changed, 9 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 60946809e2c60f..471cd15ee9c870 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -4243,21 +4243,6 @@ bool RISCV::isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS) {
   return LHS.getImm() <= RHS.getImm();
 }
 
-/// Given two VL operands, do we know that LHS < RHS?
-bool RISCV::isVLKnownLT(const MachineOperand &LHS, const MachineOperand &RHS) {
-  if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
-      LHS.getReg() == RHS.getReg())
-    return false;
-  if (RHS.isImm() && RHS.getImm() == RISCV::VLMaxSentinel && LHS.isImm() &&
-      LHS.getImm() != RHS.getImm())
-    return true;
-  if (LHS.isImm() && LHS.getImm() == RISCV::VLMaxSentinel)
-    return false;
-  if (!LHS.isImm() || !RHS.isImm())
-    return false;
-  return LHS.getImm() < RHS.getImm();
-}
-
 namespace {
 class RISCVPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
   const MachineInstr *LHS;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index e345aca377f2fa..1c81719c767ecb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -356,9 +356,6 @@ static constexpr int64_t VLMaxSentinel = -1LL;
 /// Given two VL operands, do we know that LHS <= RHS?
 bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS);
 
-/// Given two VL operands, do we know that LHS < RHS?
-bool isVLKnownLT(const MachineOperand &LHS, const MachineOperand &RHS);
-
 // Mask assignments for floating-point
 static constexpr unsigned FPMASK_Negative_Infinity = 0x001;
 static constexpr unsigned FPMASK_Negative_Normal = 0x002;
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index b8cfe0b445d9cd..6f783ed15e5885 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1311,8 +1311,15 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI) {
     unsigned VLOpNum = RISCVII::getVLOpNum(MI.getDesc());
     MachineOperand &VLOp = MI.getOperand(VLOpNum);
 
-    if (!RISCV::isVLKnownLT(*CommonVL, VLOp)) {
-      LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not < VLOp.\n");
+    if (!RISCV::isVLKnownLE(*CommonVL, VLOp)) {
+      LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not <= VLOp.\n");
+      continue;
+    }
+
+    if (CommonVL->isIdenticalTo(VLOp)) {
+      LLVM_DEBUG(
+          dbgs()
+          << "    Abort due to CommonVL == VLOp, no point in reducing.\n");
       continue;
     }
 

>From 296dee67a40ce2f06314738a39796770786c5afb Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 22 Jan 2025 09:58:16 -0800
Subject: [PATCH 5/7] fixup! useful test case

---
 llvm/test/CodeGen/RISCV/rvv/vl-opt.ll         |  23 --
 llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll  |  20 ++
 .../CodeGen/RISCV/rvv/vlopt-slow-case.mir     | 299 ------------------
 3 files changed, 20 insertions(+), 322 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir

diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index 2599571dd78532..1cc30f077feb4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -150,26 +150,3 @@ define <vscale x 4 x i32> @dont_optimize_tied_def(<vscale x 4 x i32> %a, <vscale
   ret <vscale x 4 x i32> %2
 }
 
-define <vscale x 4 x i32> @same_vl_imm(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: same_vl_imm:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT:    vadd.vv v8, v10, v12
-; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 4)
-  %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 4)
-  ret <vscale x 4 x i32> %w
-}
-
-define <vscale x 4 x i32> @same_vl_reg(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
-; CHECK-LABEL: same_vl_reg:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vadd.vv v8, v10, v12
-; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl)
-  %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen %vl)
-  ret <vscale x 4 x i32> %w
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll b/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll
new file mode 100644
index 00000000000000..7b948593abf74d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-enable-vl-optimizer \
+; RUN:   -verify-machineinstrs -debug-only=riscv-vl-optimizer -o - 2>&1 %s | FileCheck %s 
+
+; REQUIRES: asserts
+
+define <vscale x 4 x i32> @same_vl_imm(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+  ; CHECK: User VL is: 4
+  ; CHECK-NEXT: Abort due to CommonVL == VLOp, no point in reducing.
+  %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 4)
+  %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, i64 4)
+  ret <vscale x 4 x i32> %w
+}
+
+define <vscale x 4 x i32> @same_vl_reg(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 %vl) {
+  ; CHECK: User VL is: %3:gprnox0
+  ; CHECK-NEXT: Abort due to CommonVL == VLOp, no point in reducing.
+  %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 %vl)
+  %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, i64 %vl)
+  ret <vscale x 4 x i32> %w
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir b/llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir
deleted file mode 100644
index 6efb657df464bf..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlopt-slow-case.mir
+++ /dev/null
@@ -1,299 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc %s -o - -mtriple=riscv64 -mattr=+v -run-pass=riscv-vl-optimizer -verify-machineinstrs | FileCheck %s
-
----
-name: test
-body: |
-  bb.0:
-    liveins: $x11
-
-    ; CHECK-LABEL: name: test
-    ; CHECK: liveins: $x11
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
-    ; CHECK-NEXT: [[LD:%[0-9]+]]:gpr = LD [[COPY]], 32
-    ; CHECK-NEXT: [[LD1:%[0-9]+]]:gpr = LD [[LD]], 0
-    ; CHECK-NEXT: [[LD2:%[0-9]+]]:gpr = LD [[LD]], 8
-    ; CHECK-NEXT: [[LD3:%[0-9]+]]:gpr = LD [[LD]], 16
-    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI killed [[LD3]], 64
-    ; CHECK-NEXT: [[LW:%[0-9]+]]:gpr = LW [[LD1]], 4
-    ; CHECK-NEXT: [[LW1:%[0-9]+]]:gpr = LW [[LD1]], 0
-    ; CHECK-NEXT: [[LWU:%[0-9]+]]:gpr = LWU [[LD1]], 12
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[LW]], [[LW1]]
-    ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 113938
-    ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI killed [[LUI]], -1062
-    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:gpr = XOR killed [[XOR]], killed [[ADDI1]]
-    ; CHECK-NEXT: [[LW2:%[0-9]+]]:gpr = LW [[LD1]], 8
-    ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI killed [[LW2]], 32
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[SLLI]], killed [[LWU]]
-    ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 $noreg, 4, 6 /* e64 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 $noreg, killed [[PseudoVID_V_M1_]], killed [[OR]], 4, 6 /* e64 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVNSRL_WI_MF2_:%[0-9]+]]:vr = PseudoVNSRL_WI_MF2 $noreg, [[PseudoVADD_VX_M1_]], 0, 4, 5 /* e32 */, 3 /* ta, ma */
-    ; CHECK-NEXT: [[ADDI2:%[0-9]+]]:gpr = ADDI $x0, 32
-    ; CHECK-NEXT: [[PseudoVNSRL_WX_MF2_:%[0-9]+]]:vr = PseudoVNSRL_WX_MF2 $noreg, [[PseudoVADD_VX_M1_]], killed [[ADDI2]], 4, 5 /* e32 */, 3 /* ta, ma */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVNSRL_WI_MF2_]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_1:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVNSRL_WX_MF2_]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_]], [[PseudoVADD_VX_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VX_MF2_1]], 19, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VX_MF2_1]], 13, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_]], killed [[PseudoVSRL_VI_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_]], killed [[PseudoVOR_VV_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_1:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_]], [[PseudoVADD_VV_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_1:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_1:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_1:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_1]], killed [[PseudoVSRL_VI_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_1:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_1]], killed [[PseudoVOR_VV_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_2:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_1]], [[PseudoVADD_VV_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_2:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_1]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_2:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_1]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_2:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_2]], killed [[PseudoVSRL_VI_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_2:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_2]], killed [[PseudoVOR_VV_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_3:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_2]], [[PseudoVADD_VV_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_3:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_2]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_3:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_2]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_3:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_3]], killed [[PseudoVSRL_VI_MF2_3]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_3:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_3]], killed [[PseudoVOR_VV_MF2_3]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_2:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_3]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_3:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_3]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_3]], 1, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_4:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_2]], [[PseudoVADD_VI_MF2_]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_4:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_4:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_4:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_4]], killed [[PseudoVSRL_VI_MF2_4]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_4:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_4]], killed [[PseudoVOR_VV_MF2_4]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_5:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_4]], [[PseudoVADD_VV_MF2_4]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_5:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_4]], 3, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_5:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_4]], 29, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_5:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_5]], killed [[PseudoVSRL_VI_MF2_5]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_5:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_5]], killed [[PseudoVOR_VV_MF2_5]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_6:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_5]], [[PseudoVADD_VV_MF2_5]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_6:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_5]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_6:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_5]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_6:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_6]], killed [[PseudoVSRL_VI_MF2_6]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_6:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_6]], killed [[PseudoVOR_VV_MF2_6]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_7:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_6]], [[PseudoVADD_VV_MF2_6]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_7:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_6]], 8, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_7:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_6]], 24, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_7:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_7]], killed [[PseudoVSRL_VI_MF2_7]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_7:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_7]], killed [[PseudoVOR_VV_MF2_7]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_4:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_7]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_5:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_7]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_1:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_5]], 2, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_8:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_4]], [[PseudoVADD_VI_MF2_1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_8:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_1]], 19, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_8:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_1]], 13, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_8:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_8]], killed [[PseudoVSRL_VI_MF2_8]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_8:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_8]], killed [[PseudoVOR_VV_MF2_8]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_9:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_8]], [[PseudoVADD_VV_MF2_8]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_9:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_8]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_9:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_8]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_9:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_9]], killed [[PseudoVSRL_VI_MF2_9]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_9:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_9]], killed [[PseudoVOR_VV_MF2_9]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_10:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_9]], [[PseudoVADD_VV_MF2_9]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_10:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_9]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_10:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_9]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_10:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_10]], killed [[PseudoVSRL_VI_MF2_10]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_10:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_10]], killed [[PseudoVOR_VV_MF2_10]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_11:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_10]], [[PseudoVADD_VV_MF2_10]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_11:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_10]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_11:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_10]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_11:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_11]], killed [[PseudoVSRL_VI_MF2_11]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_11:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_11]], killed [[PseudoVOR_VV_MF2_11]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_6:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_11]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_7:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_11]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_2:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_7]], 3, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_12:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_6]], [[PseudoVADD_VI_MF2_2]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_12:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_2]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_12:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_2]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_12:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_12]], killed [[PseudoVSRL_VI_MF2_12]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_12:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_12]], killed [[PseudoVOR_VV_MF2_12]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_13:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_12]], [[PseudoVADD_VV_MF2_12]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_13:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_12]], 3, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_13:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_12]], 29, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_13:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_13]], killed [[PseudoVSRL_VI_MF2_13]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_13:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_13]], killed [[PseudoVOR_VV_MF2_13]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_14:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_13]], [[PseudoVADD_VV_MF2_13]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_14:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_13]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_14:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_13]], 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_14:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_14]], killed [[PseudoVSRL_VI_MF2_14]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_14:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_14]], killed [[PseudoVOR_VV_MF2_14]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_15:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_14]], [[PseudoVADD_VV_MF2_14]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_15:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_14]], 8, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_15:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_14]], 24, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_15:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_15]], killed [[PseudoVSRL_VI_MF2_15]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_15:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_15]], killed [[PseudoVOR_VV_MF2_15]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_8:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_15]], [[LW]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_9:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_15]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_3:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_9]], 4, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_16:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, killed [[PseudoVADD_VX_MF2_8]], [[PseudoVADD_VI_MF2_3]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_16:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_3]], 19, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_16:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVADD_VI_MF2_3]], 13, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_16:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_16]], killed [[PseudoVSRL_VI_MF2_16]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_16:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_16]], killed [[PseudoVOR_VV_MF2_16]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_17:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_16]], [[PseudoVADD_VV_MF2_16]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_17:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_16]], 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_17:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_16]], 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_17:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_17]], killed [[PseudoVSRL_VI_MF2_17]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_17:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_17]], killed [[PseudoVOR_VV_MF2_17]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_18:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_17]], [[PseudoVADD_VV_MF2_17]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_18:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_17]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_18:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_17]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_18:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_18]], killed [[PseudoVSRL_VI_MF2_18]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_18:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_18]], killed [[PseudoVOR_VV_MF2_18]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VV_MF2_19:%[0-9]+]]:vr = PseudoVADD_VV_MF2 $noreg, [[PseudoVXOR_VV_MF2_18]], [[PseudoVADD_VV_MF2_18]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSRL_VI_MF2_19:%[0-9]+]]:vr = PseudoVSRL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_18]], 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVSLL_VI_MF2_19:%[0-9]+]]:vr = PseudoVSLL_VI_MF2 $noreg, [[PseudoVXOR_VV_MF2_18]], 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVOR_VV_MF2_19:%[0-9]+]]:vr = PseudoVOR_VV_MF2 $noreg, killed [[PseudoVSLL_VI_MF2_19]], killed [[PseudoVSRL_VI_MF2_19]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVXOR_VV_MF2_19:%[0-9]+]]:vr = PseudoVXOR_VV_MF2 $noreg, [[PseudoVADD_VV_MF2_19]], killed [[PseudoVOR_VV_MF2_19]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_10:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, [[PseudoVADD_VV_MF2_19]], [[XOR1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VX_MF2_11:%[0-9]+]]:vr = PseudoVADD_VX_MF2 $noreg, killed [[PseudoVXOR_VV_MF2_19]], [[LW1]], 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: [[PseudoVADD_VI_MF2_4:%[0-9]+]]:vr = PseudoVADD_VI_MF2 $noreg, killed [[PseudoVADD_VX_MF2_11]], 5, 4, 5 /* e32 */, 1 /* ta, mu */
-    ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_MF2_10]], killed [[LD2]], 4, 5 /* e32 */
-    ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VI_MF2_4]], killed [[ADDI]], 4, 5 /* e32 */
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
-    ; CHECK-NEXT: $x10 = COPY [[COPY1]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %1:gpr = COPY $x11
-    %3:gpr = LD %1, 32
-    %4:gpr = LD %3, 0
-    %5:gpr = LD %3, 8
-    %6:gpr = LD %3, 16
-    %7:gpr = ADDI killed %6, 64
-    %8:gpr = LW %4, 4
-    %9:gpr = LW %4, 0
-    %10:gpr = LWU %4, 12
-    %11:gpr = XOR %8, %9
-    %12:gpr = LUI 113938
-    %13:gpr = ADDI killed %12, -1062
-    %14:gpr = XOR killed %11, killed %13
-    %15:gpr = LW %4, 8
-    %16:gpr = SLLI killed %15, 32
-    %17:gpr = OR killed %16, killed %10
-    %18:vr = PseudoVID_V_M1 $noreg, 4, 6 /* e64 */, 1 /* ta, mu */
-    %19:vr = PseudoVADD_VX_M1 $noreg, killed %18, killed %17, 4, 6 /* e64 */, 1 /* ta, mu */
-    %20:vr = PseudoVNSRL_WI_MF2 $noreg, %19, 0, 4, 5 /* e32 */, 3 /* ta, ma */
-    %21:gpr = ADDI $x0, 32
-    %22:vr = PseudoVNSRL_WX_MF2 $noreg, %19, killed %21, 4, 5 /* e32 */, 3 /* ta, ma */
-    %23:vr = PseudoVADD_VX_MF2 $noreg, killed %20, %9, 4, 5 /* e32 */, 1 /* ta, mu */
-    %24:vr = PseudoVADD_VX_MF2 $noreg, killed %22, %8, 4, 5 /* e32 */, 1 /* ta, mu */
-    %25:vr = PseudoVADD_VV_MF2 $noreg, killed %23, %24, 4, 5 /* e32 */, 1 /* ta, mu */
-    %26:vr = PseudoVSRL_VI_MF2 $noreg, %24, 19, 4, 5 /* e32 */, 1 /* ta, mu */
-    %27:vr = PseudoVSLL_VI_MF2 $noreg, %24, 13, 4, 5 /* e32 */, 1 /* ta, mu */
-    %28:vr = PseudoVOR_VV_MF2 $noreg, killed %27, killed %26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %29:vr = PseudoVXOR_VV_MF2 $noreg, %25, killed %28, 4, 5 /* e32 */, 1 /* ta, mu */
-    %30:vr = PseudoVADD_VV_MF2 $noreg, %29, %25, 4, 5 /* e32 */, 1 /* ta, mu */
-    %31:vr = PseudoVSRL_VI_MF2 $noreg, %29, 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    %32:vr = PseudoVSLL_VI_MF2 $noreg, %29, 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    %33:vr = PseudoVOR_VV_MF2 $noreg, killed %32, killed %31, 4, 5 /* e32 */, 1 /* ta, mu */
-    %34:vr = PseudoVXOR_VV_MF2 $noreg, %30, killed %33, 4, 5 /* e32 */, 1 /* ta, mu */
-    %35:vr = PseudoVADD_VV_MF2 $noreg, %34, %30, 4, 5 /* e32 */, 1 /* ta, mu */
-    %36:vr = PseudoVSRL_VI_MF2 $noreg, %34, 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    %37:vr = PseudoVSLL_VI_MF2 $noreg, %34, 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %38:vr = PseudoVOR_VV_MF2 $noreg, killed %37, killed %36, 4, 5 /* e32 */, 1 /* ta, mu */
-    %39:vr = PseudoVXOR_VV_MF2 $noreg, %35, killed %38, 4, 5 /* e32 */, 1 /* ta, mu */
-    %40:vr = PseudoVADD_VV_MF2 $noreg, %39, %35, 4, 5 /* e32 */, 1 /* ta, mu */
-    %41:vr = PseudoVSRL_VI_MF2 $noreg, %39, 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %42:vr = PseudoVSLL_VI_MF2 $noreg, %39, 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    %43:vr = PseudoVOR_VV_MF2 $noreg, killed %42, killed %41, 4, 5 /* e32 */, 1 /* ta, mu */
-    %44:vr = PseudoVXOR_VV_MF2 $noreg, %40, killed %43, 4, 5 /* e32 */, 1 /* ta, mu */
-    %45:vr = PseudoVADD_VX_MF2 $noreg, %40, %8, 4, 5 /* e32 */, 1 /* ta, mu */
-    %46:vr = PseudoVADD_VX_MF2 $noreg, killed %44, %14, 4, 5 /* e32 */, 1 /* ta, mu */
-    %47:vr = PseudoVADD_VI_MF2 $noreg, killed %46, 1, 4, 5 /* e32 */, 1 /* ta, mu */
-    %48:vr = PseudoVADD_VV_MF2 $noreg, killed %45, %47, 4, 5 /* e32 */, 1 /* ta, mu */
-    %49:vr = PseudoVSRL_VI_MF2 $noreg, %47, 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    %50:vr = PseudoVSLL_VI_MF2 $noreg, %47, 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    %51:vr = PseudoVOR_VV_MF2 $noreg, killed %50, killed %49, 4, 5 /* e32 */, 1 /* ta, mu */
-    %52:vr = PseudoVXOR_VV_MF2 $noreg, %48, killed %51, 4, 5 /* e32 */, 1 /* ta, mu */
-    %53:vr = PseudoVADD_VV_MF2 $noreg, %52, %48, 4, 5 /* e32 */, 1 /* ta, mu */
-    %54:vr = PseudoVSRL_VI_MF2 $noreg, %52, 3, 4, 5 /* e32 */, 1 /* ta, mu */
-    %55:vr = PseudoVSLL_VI_MF2 $noreg, %52, 29, 4, 5 /* e32 */, 1 /* ta, mu */
-    %56:vr = PseudoVOR_VV_MF2 $noreg, killed %55, killed %54, 4, 5 /* e32 */, 1 /* ta, mu */
-    %57:vr = PseudoVXOR_VV_MF2 $noreg, %53, killed %56, 4, 5 /* e32 */, 1 /* ta, mu */
-    %58:vr = PseudoVADD_VV_MF2 $noreg, %57, %53, 4, 5 /* e32 */, 1 /* ta, mu */
-    %59:vr = PseudoVSRL_VI_MF2 $noreg, %57, 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    %60:vr = PseudoVSLL_VI_MF2 $noreg, %57, 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    %61:vr = PseudoVOR_VV_MF2 $noreg, killed %60, killed %59, 4, 5 /* e32 */, 1 /* ta, mu */
-    %62:vr = PseudoVXOR_VV_MF2 $noreg, %58, killed %61, 4, 5 /* e32 */, 1 /* ta, mu */
-    %63:vr = PseudoVADD_VV_MF2 $noreg, %62, %58, 4, 5 /* e32 */, 1 /* ta, mu */
-    %64:vr = PseudoVSRL_VI_MF2 $noreg, %62, 8, 4, 5 /* e32 */, 1 /* ta, mu */
-    %65:vr = PseudoVSLL_VI_MF2 $noreg, %62, 24, 4, 5 /* e32 */, 1 /* ta, mu */
-    %66:vr = PseudoVOR_VV_MF2 $noreg, killed %65, killed %64, 4, 5 /* e32 */, 1 /* ta, mu */
-    %67:vr = PseudoVXOR_VV_MF2 $noreg, %63, killed %66, 4, 5 /* e32 */, 1 /* ta, mu */
-    %68:vr = PseudoVADD_VX_MF2 $noreg, %63, %14, 4, 5 /* e32 */, 1 /* ta, mu */
-    %69:vr = PseudoVADD_VX_MF2 $noreg, killed %67, %9, 4, 5 /* e32 */, 1 /* ta, mu */
-    %70:vr = PseudoVADD_VI_MF2 $noreg, killed %69, 2, 4, 5 /* e32 */, 1 /* ta, mu */
-    %71:vr = PseudoVADD_VV_MF2 $noreg, killed %68, %70, 4, 5 /* e32 */, 1 /* ta, mu */
-    %72:vr = PseudoVSRL_VI_MF2 $noreg, %70, 19, 4, 5 /* e32 */, 1 /* ta, mu */
-    %73:vr = PseudoVSLL_VI_MF2 $noreg, %70, 13, 4, 5 /* e32 */, 1 /* ta, mu */
-    %74:vr = PseudoVOR_VV_MF2 $noreg, killed %73, killed %72, 4, 5 /* e32 */, 1 /* ta, mu */
-    %75:vr = PseudoVXOR_VV_MF2 $noreg, %71, killed %74, 4, 5 /* e32 */, 1 /* ta, mu */
-    %76:vr = PseudoVADD_VV_MF2 $noreg, %75, %71, 4, 5 /* e32 */, 1 /* ta, mu */
-    %77:vr = PseudoVSRL_VI_MF2 $noreg, %75, 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    %78:vr = PseudoVSLL_VI_MF2 $noreg, %75, 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    %79:vr = PseudoVOR_VV_MF2 $noreg, killed %78, killed %77, 4, 5 /* e32 */, 1 /* ta, mu */
-    %80:vr = PseudoVXOR_VV_MF2 $noreg, %76, killed %79, 4, 5 /* e32 */, 1 /* ta, mu */
-    %81:vr = PseudoVADD_VV_MF2 $noreg, %80, %76, 4, 5 /* e32 */, 1 /* ta, mu */
-    %82:vr = PseudoVSRL_VI_MF2 $noreg, %80, 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    %83:vr = PseudoVSLL_VI_MF2 $noreg, %80, 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %84:vr = PseudoVOR_VV_MF2 $noreg, killed %83, killed %82, 4, 5 /* e32 */, 1 /* ta, mu */
-    %85:vr = PseudoVXOR_VV_MF2 $noreg, %81, killed %84, 4, 5 /* e32 */, 1 /* ta, mu */
-    %86:vr = PseudoVADD_VV_MF2 $noreg, %85, %81, 4, 5 /* e32 */, 1 /* ta, mu */
-    %87:vr = PseudoVSRL_VI_MF2 $noreg, %85, 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %88:vr = PseudoVSLL_VI_MF2 $noreg, %85, 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    %89:vr = PseudoVOR_VV_MF2 $noreg, killed %88, killed %87, 4, 5 /* e32 */, 1 /* ta, mu */
-    %90:vr = PseudoVXOR_VV_MF2 $noreg, %86, killed %89, 4, 5 /* e32 */, 1 /* ta, mu */
-    %91:vr = PseudoVADD_VX_MF2 $noreg, %86, %9, 4, 5 /* e32 */, 1 /* ta, mu */
-    %92:vr = PseudoVADD_VX_MF2 $noreg, killed %90, %8, 4, 5 /* e32 */, 1 /* ta, mu */
-    %93:vr = PseudoVADD_VI_MF2 $noreg, killed %92, 3, 4, 5 /* e32 */, 1 /* ta, mu */
-    %94:vr = PseudoVADD_VV_MF2 $noreg, killed %91, %93, 4, 5 /* e32 */, 1 /* ta, mu */
-    %95:vr = PseudoVSRL_VI_MF2 $noreg, %93, 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    %96:vr = PseudoVSLL_VI_MF2 $noreg, %93, 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    %97:vr = PseudoVOR_VV_MF2 $noreg, killed %96, killed %95, 4, 5 /* e32 */, 1 /* ta, mu */
-    %98:vr = PseudoVXOR_VV_MF2 $noreg, %94, killed %97, 4, 5 /* e32 */, 1 /* ta, mu */
-    %99:vr = PseudoVADD_VV_MF2 $noreg, %98, %94, 4, 5 /* e32 */, 1 /* ta, mu */
-    %100:vr = PseudoVSRL_VI_MF2 $noreg, %98, 3, 4, 5 /* e32 */, 1 /* ta, mu */
-    %101:vr = PseudoVSLL_VI_MF2 $noreg, %98, 29, 4, 5 /* e32 */, 1 /* ta, mu */
-    %102:vr = PseudoVOR_VV_MF2 $noreg, killed %101, killed %100, 4, 5 /* e32 */, 1 /* ta, mu */
-    %103:vr = PseudoVXOR_VV_MF2 $noreg, %99, killed %102, 4, 5 /* e32 */, 1 /* ta, mu */
-    %104:vr = PseudoVADD_VV_MF2 $noreg, %103, %99, 4, 5 /* e32 */, 1 /* ta, mu */
-    %105:vr = PseudoVSRL_VI_MF2 $noreg, %103, 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    %106:vr = PseudoVSLL_VI_MF2 $noreg, %103, 16, 4, 5 /* e32 */, 1 /* ta, mu */
-    %107:vr = PseudoVOR_VV_MF2 $noreg, killed %106, killed %105, 4, 5 /* e32 */, 1 /* ta, mu */
-    %108:vr = PseudoVXOR_VV_MF2 $noreg, %104, killed %107, 4, 5 /* e32 */, 1 /* ta, mu */
-    %109:vr = PseudoVADD_VV_MF2 $noreg, %108, %104, 4, 5 /* e32 */, 1 /* ta, mu */
-    %110:vr = PseudoVSRL_VI_MF2 $noreg, %108, 8, 4, 5 /* e32 */, 1 /* ta, mu */
-    %111:vr = PseudoVSLL_VI_MF2 $noreg, %108, 24, 4, 5 /* e32 */, 1 /* ta, mu */
-    %112:vr = PseudoVOR_VV_MF2 $noreg, killed %111, killed %110, 4, 5 /* e32 */, 1 /* ta, mu */
-    %113:vr = PseudoVXOR_VV_MF2 $noreg, %109, killed %112, 4, 5 /* e32 */, 1 /* ta, mu */
-    %114:vr = PseudoVADD_VX_MF2 $noreg, %109, %8, 4, 5 /* e32 */, 1 /* ta, mu */
-    %115:vr = PseudoVADD_VX_MF2 $noreg, killed %113, %14, 4, 5 /* e32 */, 1 /* ta, mu */
-    %116:vr = PseudoVADD_VI_MF2 $noreg, killed %115, 4, 4, 5 /* e32 */, 1 /* ta, mu */
-    %117:vr = PseudoVADD_VV_MF2 $noreg, killed %114, %116, 4, 5 /* e32 */, 1 /* ta, mu */
-    %118:vr = PseudoVSRL_VI_MF2 $noreg, %116, 19, 4, 5 /* e32 */, 1 /* ta, mu */
-    %119:vr = PseudoVSLL_VI_MF2 $noreg, %116, 13, 4, 5 /* e32 */, 1 /* ta, mu */
-    %120:vr = PseudoVOR_VV_MF2 $noreg, killed %119, killed %118, 4, 5 /* e32 */, 1 /* ta, mu */
-    %121:vr = PseudoVXOR_VV_MF2 $noreg, %117, killed %120, 4, 5 /* e32 */, 1 /* ta, mu */
-    %122:vr = PseudoVADD_VV_MF2 $noreg, %121, %117, 4, 5 /* e32 */, 1 /* ta, mu */
-    %123:vr = PseudoVSRL_VI_MF2 $noreg, %121, 17, 4, 5 /* e32 */, 1 /* ta, mu */
-    %124:vr = PseudoVSLL_VI_MF2 $noreg, %121, 15, 4, 5 /* e32 */, 1 /* ta, mu */
-    %125:vr = PseudoVOR_VV_MF2 $noreg, killed %124, killed %123, 4, 5 /* e32 */, 1 /* ta, mu */
-    %126:vr = PseudoVXOR_VV_MF2 $noreg, %122, killed %125, 4, 5 /* e32 */, 1 /* ta, mu */
-    %127:vr = PseudoVADD_VV_MF2 $noreg, %126, %122, 4, 5 /* e32 */, 1 /* ta, mu */
-    %128:vr = PseudoVSRL_VI_MF2 $noreg, %126, 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    %129:vr = PseudoVSLL_VI_MF2 $noreg, %126, 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %130:vr = PseudoVOR_VV_MF2 $noreg, killed %129, killed %128, 4, 5 /* e32 */, 1 /* ta, mu */
-    %131:vr = PseudoVXOR_VV_MF2 $noreg, %127, killed %130, 4, 5 /* e32 */, 1 /* ta, mu */
-    %132:vr = PseudoVADD_VV_MF2 $noreg, %131, %127, 4, 5 /* e32 */, 1 /* ta, mu */
-    %133:vr = PseudoVSRL_VI_MF2 $noreg, %131, 26, 4, 5 /* e32 */, 1 /* ta, mu */
-    %134:vr = PseudoVSLL_VI_MF2 $noreg, %131, 6, 4, 5 /* e32 */, 1 /* ta, mu */
-    %135:vr = PseudoVOR_VV_MF2 $noreg, killed %134, killed %133, 4, 5 /* e32 */, 1 /* ta, mu */
-    %136:vr = PseudoVXOR_VV_MF2 $noreg, %132, killed %135, 4, 5 /* e32 */, 1 /* ta, mu */
-    %137:vr = PseudoVADD_VX_MF2 $noreg, %132, %14, 4, 5 /* e32 */, 1 /* ta, mu */
-    %138:vr = PseudoVADD_VX_MF2 $noreg, killed %136, %9, 4, 5 /* e32 */, 1 /* ta, mu */
-    %139:vr = PseudoVADD_VI_MF2 $noreg, killed %138, 5, 4, 5 /* e32 */, 1 /* ta, mu */
-    PseudoVSE32_V_MF2 killed %137, killed %5, 4, 5 /* e32 */
-    PseudoVSE32_V_MF2 killed %139, killed %7, 4, 5 /* e32 */
-    %140:gpr = COPY $x0
-    $x10 = COPY %140
-    PseudoRET implicit $x10
-...

>From b48d9183db30b8ad15f6f02312d1188e0b275a48 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 22 Jan 2025 10:13:26 -0800
Subject: [PATCH 6/7] fixup! add comment to test

---
 llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll b/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll
index 7b948593abf74d..65e6eddfb3cd60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlopt-same-vl.ll
@@ -3,6 +3,13 @@
 
 ; REQUIRES: asserts
 
+; GitHub Issue #123862 provided a case where the riscv-vl-optimizer pass was
+; very slow. It was found that that case benefited greatly from aborting due
+; to CommonVL == VLOp. Adding the case provided in the issue would show up
+; as a long running test instead of a test failure. We would likley have a hard
+; time figuring if that case had a regression. So instead, we check this output
+; which was responsible for speeding it up. 
+
 define <vscale x 4 x i32> @same_vl_imm(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
   ; CHECK: User VL is: 4
   ; CHECK-NEXT: Abort due to CommonVL == VLOp, no point in reducing.

>From 3e458739f761ec70fd632214595f31c2443f96e4 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 22 Jan 2025 10:47:01 -0800
Subject: [PATCH 7/7] fixup! rebase

---
 llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 56 ++++------------------
 1 file changed, 8 insertions(+), 48 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 6f783ed15e5885..fc3300247b1909 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1308,55 +1308,15 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI) {
   unsigned VLOpNum = RISCVII::getVLOpNum(MI.getDesc());
   MachineOperand &VLOp = MI.getOperand(VLOpNum);
 
-    unsigned VLOpNum = RISCVII::getVLOpNum(MI.getDesc());
-    MachineOperand &VLOp = MI.getOperand(VLOpNum);
-
-    if (!RISCV::isVLKnownLE(*CommonVL, VLOp)) {
-      LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not <= VLOp.\n");
-      continue;
-    }
-
-    if (CommonVL->isIdenticalTo(VLOp)) {
-      LLVM_DEBUG(
-          dbgs()
-          << "    Abort due to CommonVL == VLOp, no point in reducing.\n");
-      continue;
-    }
-
-    if (CommonVL->isImm()) {
-      LLVM_DEBUG(dbgs() << "  Reduce VL from " << VLOp << " to "
-                        << CommonVL->getImm() << " for " << MI << "\n");
-      VLOp.ChangeToImmediate(CommonVL->getImm());
-    } else {
-      const MachineInstr *VLMI = MRI->getVRegDef(CommonVL->getReg());
-      if (!MDT->dominates(VLMI, &MI))
-        continue;
-      LLVM_DEBUG(
-          dbgs() << "  Reduce VL from " << VLOp << " to "
-                 << printReg(CommonVL->getReg(), MRI->getTargetRegisterInfo())
-                 << " for " << MI << "\n");
-
-      // All our checks passed. We can reduce VL.
-      VLOp.ChangeToRegister(CommonVL->getReg(), false);
-    }
-
-    MadeChange = true;
-
-    // Now add all inputs to this instruction to the worklist.
-    for (auto &Op : MI.operands()) {
-      if (!Op.isReg() || !Op.isUse() || !Op.getReg().isVirtual())
-        continue;
-
-      if (!isVectorRegClass(Op.getReg(), MRI))
-        continue;
-
-      MachineInstr *DefMI = MRI->getVRegDef(Op.getReg());
-
-      if (!isCandidate(*DefMI))
-        continue;
+  if (!RISCV::isVLKnownLE(*CommonVL, VLOp)) {
+    LLVM_DEBUG(dbgs() << "    Abort due to CommonVL not <= VLOp.\n");
+    return false;
+  }
 
-      Worklist.insert(DefMI);
-    }
+  if (CommonVL->isIdenticalTo(VLOp)) {
+    LLVM_DEBUG(
+        dbgs() << "    Abort due to CommonVL == VLOp, no point in reducing.\n");
+    return false;
   }
 
   if (CommonVL->isImm()) {



More information about the llvm-commits mailing list