[llvm] [RISCV] Remove SEW operand for load/store and SEW-aware pseudos (PR #90396)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 28 21:02:24 PDT 2024


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/90396

>From 80b815ef708b32a99a0b986f012b96eed9dd729c Mon Sep 17 00:00:00 2001
From: wangpc <wangpengcheng.pp at bytedance.com>
Date: Sun, 28 Apr 2024 20:22:13 +0800
Subject: [PATCH 1/2] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20in?=
 =?UTF-8?q?itial=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Created using spr 1.3.6-beta.1
---
 .../llvm/TargetParser/RISCVTargetParser.h     |   7 +
 .../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h |  56 +-
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   |  35 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   1 -
 llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp  |  20 +-
 llvm/lib/Target/RISCV/RISCVInstrFormats.td    |   7 +
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp      |  22 +-
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    | 871 ++++++++++++------
 .../Target/RISCV/RISCVInstrInfoVSDPatterns.td | 265 +++---
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 286 +++---
 llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp     |   4 +-
 .../RISCV/rvv/addi-scalable-offset.mir        |   4 +-
 llvm/test/CodeGen/RISCV/rvv/copyprop.mir      |   2 +-
 .../RISCV/rvv/debug-info-rvv-dbg-value.mir    |   4 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-fmf.ll    |   2 +-
 .../test/CodeGen/RISCV/rvv/frameindex-addr.ll |   2 +-
 .../CodeGen/RISCV/rvv/implicit-def-copy.ll    |   2 +-
 .../RISCV/rvv/pass-fast-math-flags-sdnode.ll  |   2 +-
 .../test/CodeGen/RISCV/rvv/reg-coalescing.mir |  12 +-
 .../RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll |   4 +-
 .../rvv/strided-vpload-vpstore-output.ll      |   4 +-
 .../rvv/subregister-undef-early-clobber.mir   | 160 ++--
 .../RISCV/rvv/tail-agnostic-impdef-copy.mir   |   5 +-
 .../RISCV/rvv/vleff-vlseg2ff-output.ll        |  12 +-
 llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir      |  84 +-
 .../RISCV/rvv/vsetvli-insert-crossbb.mir      |  56 +-
 .../test/CodeGen/RISCV/rvv/vsetvli-insert.mir |  35 +-
 .../rvv/wrong-stack-offset-for-rvv-object.mir |   4 +-
 llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir |   4 +-
 29 files changed, 1167 insertions(+), 805 deletions(-)

diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
index cdd19189f8dc7d..d80ad7b6e9a8c3 100644
--- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h
+++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
@@ -51,6 +51,13 @@ enum VLMUL : uint8_t {
   LMUL_F2
 };
 
+enum VSEW : uint8_t {
+  SEW_8 = 0,
+  SEW_16,
+  SEW_32,
+  SEW_64,
+};
+
 enum {
   TAIL_UNDISTURBED_MASK_UNDISTURBED = 0,
   TAIL_AGNOSTIC = 1,
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 08f056f78979af..cb5ab1e3a42911 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -18,10 +18,13 @@
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineOperand.h"
 #include "llvm/MC/MCInstrDesc.h"
 #include "llvm/TargetParser/RISCVISAInfo.h"
 #include "llvm/TargetParser/RISCVTargetParser.h"
 #include "llvm/TargetParser/SubtargetFeature.h"
+#include <cstdint>
 
 namespace llvm {
 
@@ -123,6 +126,12 @@ enum {
   // 3 -> widening case
   TargetOverlapConstraintTypeShift = UsesVXRMShift + 1,
   TargetOverlapConstraintTypeMask = 3ULL << TargetOverlapConstraintTypeShift,
+
+  HasImplictSEWShift = TargetOverlapConstraintTypeShift + 2,
+  HasImplictSEWMask = 1 << HasImplictSEWShift,
+
+  VSEWShift = HasImplictSEWShift + 1,
+  VSEWMask = 0b11 << VSEWShift,
 };
 
 // Helper functions to read TSFlags.
@@ -171,14 +180,29 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) {
 /// \returns true if this instruction uses vxrm
 static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; }
 
+/// \returns true if this instruction has implict SEW value.
+static inline bool hasImplictSEW(uint64_t TSFlags) {
+  return TSFlags & HasImplictSEWMask;
+}
+
+/// \returns the VSEW for the instruction.
+static inline VSEW getVSEW(uint64_t TSFlags) {
+  return static_cast<VSEW>((TSFlags & VSEWMask) >> VSEWShift);
+}
+
+/// \returns true if there is a SEW value for the instruction.
+static inline bool hasSEW(uint64_t TSFlags) {
+  return hasSEWOp(TSFlags) || hasImplictSEW(TSFlags);
+}
+
 static inline unsigned getVLOpNum(const MCInstrDesc &Desc) {
   const uint64_t TSFlags = Desc.TSFlags;
-  // This method is only called if we expect to have a VL operand, and all
-  // instructions with VL also have SEW.
-  assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags));
-  unsigned Offset = 2;
+  // This method is only called if we expect to have a VL operand.
+  assert(hasVLOp(TSFlags));
+  // Some instructions don't have SEW operand.
+  unsigned Offset = 1 + hasSEWOp(TSFlags);
   if (hasVecPolicyOp(TSFlags))
-    Offset = 3;
+    Offset = Offset + 1;
   return Desc.getNumOperands() - Offset;
 }
 
@@ -191,6 +215,28 @@ static inline unsigned getSEWOpNum(const MCInstrDesc &Desc) {
   return Desc.getNumOperands() - Offset;
 }
 
+static inline unsigned getLog2SEW(uint64_t TSFlags) {
+  return 3 + RISCVII::getVSEW(TSFlags);
+}
+
+static inline MachineOperand getSEWOp(const MachineInstr &MI) {
+  uint64_t TSFlags = MI.getDesc().TSFlags;
+  assert(hasSEW(TSFlags) && "The instruction doesn't have SEW value!");
+  if (hasSEWOp(TSFlags))
+    return MI.getOperand(getSEWOpNum(MI.getDesc()));
+
+  return MachineOperand::CreateImm(getLog2SEW(TSFlags));
+}
+
+static inline unsigned getLog2SEW(const MachineInstr &MI) {
+  uint64_t TSFlags = MI.getDesc().TSFlags;
+  assert(RISCVII::hasSEW(TSFlags) && "The instruction doesn't have SEW value!");
+  if (RISCVII::hasSEWOp(TSFlags))
+    return MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm();
+
+  return getLog2SEW(TSFlags);
+}
+
 static inline unsigned getVecPolicyOpNum(const MCInstrDesc &Desc) {
   assert(hasVecPolicyOp(Desc.TSFlags));
   return Desc.getNumOperands() - 1;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index b0568297a470a7..d5db2717a721d3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -317,8 +317,11 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
   Operands.push_back(VL);
 
   MVT XLenVT = Subtarget->getXLenVT();
-  SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
-  Operands.push_back(SEWOp);
+  // Add SEW operand if it is indexed or mask load/store instruction.
+  if (Log2SEW == 0 || IndexVT) {
+    SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
+    Operands.push_back(SEWOp);
+  }
 
   // At the IR layer, all the masked load intrinsics have policy operands,
   // none of the others do.  All have passthru operands.  For our pseudos,
@@ -2226,7 +2229,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       selectVLOp(Node->getOperand(2), VL);
 
     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
-    SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
 
     // If VL=1, then we don't need to do a strided load and can just do a
     // regular load.
@@ -2243,7 +2245,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT));
     uint64_t Policy = RISCVII::MASK_AGNOSTIC | RISCVII::TAIL_AGNOSTIC;
     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
-    Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
+    Operands.append({VL, PolicyOp, Ld->getChain()});
 
     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
@@ -2970,7 +2972,7 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo,
 
   const MCInstrDesc &MCID = TII->get(User->getMachineOpcode());
   const uint64_t TSFlags = MCID.TSFlags;
-  if (!RISCVII::hasSEWOp(TSFlags))
+  if (!RISCVII::hasSEW(TSFlags))
     return false;
   assert(RISCVII::hasVLOp(TSFlags));
 
@@ -2980,7 +2982,9 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo,
   bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TSFlags);
   unsigned VLIdx =
       User->getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
-  const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1);
+  const unsigned Log2SEW = RISCVII::hasSEWOp(TSFlags)
+                               ? User->getConstantOperandVal(VLIdx + 1)
+                               : RISCVII::getLog2SEW(TSFlags);
 
   if (UserOpNo == VLIdx)
     return false;
@@ -3696,12 +3700,18 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
       return false;
   }
 
+  SDLoc DL(N);
+
   // The vector policy operand may be present for masked intrinsics
   bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TrueTSFlags);
-  unsigned TrueVLIndex =
-      True.getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
+  bool HasSEWOp = RISCVII::hasSEWOp(TrueTSFlags);
+  unsigned TrueVLIndex = True.getNumOperands() - HasVecPolicyOp - HasChainOp -
+                         HasGlueOp - 1 - HasSEWOp;
   SDValue TrueVL = True.getOperand(TrueVLIndex);
-  SDValue SEW = True.getOperand(TrueVLIndex + 1);
+  SDValue SEW =
+      HasSEWOp ? True.getOperand(TrueVLIndex + 1)
+               : CurDAG->getTargetConstant(RISCVII::getLog2SEW(TrueTSFlags), DL,
+                                           Subtarget->getXLenVT());
 
   auto GetMinVL = [](SDValue LHS, SDValue RHS) {
     if (LHS == RHS)
@@ -3732,8 +3742,6 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
         !True->getFlags().hasNoFPExcept())
       return false;
 
-  SDLoc DL(N);
-
   // From the preconditions we checked above, we know the mask and thus glue
   // for the result node will be taken from True.
   if (IsMasked) {
@@ -3799,7 +3807,10 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
   if (HasRoundingMode)
     Ops.push_back(True->getOperand(TrueVLIndex - 1));
 
-  Ops.append({VL, SEW, PolicyOp});
+  Ops.push_back(VL);
+  if (RISCVII::hasSEWOp(TrueTSFlags))
+    Ops.push_back(SEW);
+  Ops.push_back(PolicyOp);
 
   // Result node should have chain operand of True.
   if (HasChainOp)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3ab9e7d69105ca..c317f63aadd621 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17857,7 +17857,6 @@ static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI,
       .add(MI.getOperand(3))
       .add(MachineOperand::CreateImm(7)) // frm = DYN
       .add(MI.getOperand(4))
-      .add(MI.getOperand(5))
       .add(MI.getOperand(6))
       .add(MachineOperand::CreateReg(RISCV::FRM,
                                      /*IsDef*/ false,
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index b5fd508fa77de2..ffb4bdd1cd392b 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -55,10 +55,6 @@ static unsigned getVLOpNum(const MachineInstr &MI) {
   return RISCVII::getVLOpNum(MI.getDesc());
 }
 
-static unsigned getSEWOpNum(const MachineInstr &MI) {
-  return RISCVII::getSEWOpNum(MI.getDesc());
-}
-
 static bool isVectorConfigInstr(const MachineInstr &MI) {
   return MI.getOpcode() == RISCV::PseudoVSETVLI ||
          MI.getOpcode() == RISCV::PseudoVSETVLIX0 ||
@@ -166,9 +162,9 @@ static bool isNonZeroLoadImmediate(const MachineInstr &MI) {
 /// Return true if this is an operation on mask registers.  Note that
 /// this includes both arithmetic/logical ops and load/store (vlm/vsm).
 static bool isMaskRegOp(const MachineInstr &MI) {
-  if (!RISCVII::hasSEWOp(MI.getDesc().TSFlags))
+  if (!RISCVII::hasSEW(MI.getDesc().TSFlags))
     return false;
-  const unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm();
+  const unsigned Log2SEW = RISCVII::getLog2SEW(MI);
   // A Log2SEW of 0 is an operation on mask registers only.
   return Log2SEW == 0;
 }
@@ -383,7 +379,7 @@ DemandedFields getDemanded(const MachineInstr &MI,
     Res.demandVTYPE();
   // Start conservative on the unlowered form too
   uint64_t TSFlags = MI.getDesc().TSFlags;
-  if (RISCVII::hasSEWOp(TSFlags)) {
+  if (RISCVII::hasSEW(TSFlags)) {
     Res.demandVTYPE();
     if (RISCVII::hasVLOp(TSFlags))
       Res.demandVL();
@@ -405,7 +401,7 @@ DemandedFields getDemanded(const MachineInstr &MI,
   }
 
   // Store instructions don't use the policy fields.
-  if (RISCVII::hasSEWOp(TSFlags) && MI.getNumExplicitDefs() == 0) {
+  if (RISCVII::hasSEW(TSFlags) && MI.getNumExplicitDefs() == 0) {
     Res.TailPolicy = false;
     Res.MaskPolicy = false;
   }
@@ -940,7 +936,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
 
   RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
 
-  unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm();
+  unsigned Log2SEW = RISCVII::getLog2SEW(MI);
   // A Log2SEW of 0 is an operation on mask registers only.
   unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
@@ -1176,7 +1172,7 @@ static VSETVLIInfo adjustIncoming(VSETVLIInfo PrevInfo, VSETVLIInfo NewInfo,
 void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
                                         const MachineInstr &MI) const {
   uint64_t TSFlags = MI.getDesc().TSFlags;
-  if (!RISCVII::hasSEWOp(TSFlags))
+  if (!RISCVII::hasSEW(TSFlags))
     return;
 
   const VSETVLIInfo NewInfo = computeInfoForInstr(MI, TSFlags, *ST, MRI);
@@ -1256,7 +1252,7 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
   for (const MachineInstr &MI : MBB) {
     transferBefore(Info, MI);
 
-    if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags))
+    if (isVectorConfigInstr(MI) || RISCVII::hasSEW(MI.getDesc().TSFlags))
       HadVectorOp = true;
 
     transferAfter(Info, MI);
@@ -1385,7 +1381,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
     }
 
     uint64_t TSFlags = MI.getDesc().TSFlags;
-    if (RISCVII::hasSEWOp(TSFlags)) {
+    if (RISCVII::hasSEW(TSFlags)) {
       if (PrevInfo != CurInfo) {
         // If this is the first implicit state change, and the state change
         // requested can be proven to produce the same register contents, we
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index a5c8524d05cbc5..01e514609eaf31 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -223,6 +223,13 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
   // 3 -> widening case
   bits<2> TargetOverlapConstraintType = 0;
   let TSFlags{22-21} = TargetOverlapConstraintType;
+
+  bit HasImplictSEW = 0;
+  let TSFlags{23} = HasImplictSEW;
+
+  // The actual SEW value is 8 * (2 ^ VSEW).
+  bits<2> VSEW = 0;
+  let TSFlags{25-24} = VSEW;
 }
 
 class RVInst<dag outs, dag ins, string opcodestr, string argstr,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 8cb9a40a98bcd8..50c4db3346c0b8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -287,7 +287,7 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
 
           // If the producing instruction does not depend on vsetvli, do not
           // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
-          if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
+          if (!RISCVII::hasSEW(TSFlags) || !RISCVII::hasVLOp(TSFlags))
             return false;
 
           // Found the definition.
@@ -410,9 +410,9 @@ void RISCVInstrInfo::copyPhysRegVector(
       MIB = MIB.addReg(ActualSrcReg, getKillRegState(KillSrc));
     if (UseVMV) {
       const MCInstrDesc &Desc = DefMBBI->getDesc();
-      MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc)));  // AVL
-      MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
-      MIB.addImm(0);                                            // tu, mu
+      MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
+      MIB.add(RISCVII::getSEWOp(*DefMBBI));                    // SEW
+      MIB.addImm(0);                                           // tu, mu
       MIB.addReg(RISCV::VL, RegState::Implicit);
       MIB.addReg(RISCV::VTYPE, RegState::Implicit);
     }
@@ -1706,8 +1706,7 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,
     return false;
 
   // SEW
-  if (RISCVII::hasSEWOp(TSFlags) &&
-      !checkImmOperand(RISCVII::getSEWOpNum(Desc)))
+  if (RISCVII::hasSEW(TSFlags) && !checkImmOperand(RISCVII::getSEWOpNum(Desc)))
     return false;
 
   // Mask
@@ -2463,10 +2462,6 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
         return false;
       }
     }
-    if (!RISCVII::hasSEWOp(TSFlags)) {
-      ErrInfo = "VL operand w/o SEW operand?";
-      return false;
-    }
   }
   if (RISCVII::hasSEWOp(TSFlags)) {
     unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
@@ -3521,8 +3516,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
   case CASE_FP_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
   case CASE_FP_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV): {
     assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
-           MI.getNumExplicitOperands() == 7 &&
-           "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
+           MI.getNumExplicitOperands() == 6 &&
+           "Expect 6 explicit operands rd, rs2, rs1, rm, vl, policy");
     // If the tail policy is undisturbed we can't convert.
     if ((MI.getOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())).getImm() &
          1) == 0)
@@ -3545,8 +3540,7 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
               .add(MI.getOperand(2))
               .add(MI.getOperand(3))
               .add(MI.getOperand(4))
-              .add(MI.getOperand(5))
-              .add(MI.getOperand(6));
+              .add(MI.getOperand(5));
     break;
   }
   case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index fc60a9cc7cd30e..21086688f13c70 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -769,15 +769,20 @@ class GetVTypeScalarPredicates<VTypeInfo vti> {
 class VPseudoUSLoadNoMask<VReg RetClass,
                           int EEW> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew,
-                  ixlenimm:$policy), []>,
+             !if(!eq(EEW, 1),
+                 (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  defvar hasSEWOp = !eq(EEW, 1);
+  let HasSEWOp = hasSEWOp;
+  // For mask load, EEW = 1.
+  let HasImplictSEW = !not(hasSEWOp);
+  let VSEW = !if(hasSEWOp, 0, !logtwo(!div(EEW, 8)));
   let HasVecPolicyOp = 1;
   let Constraints = "$rd = $dest";
 }
@@ -787,7 +792,7 @@ class VPseudoUSLoadMask<VReg RetClass,
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
              (ins GetVRegNoV0<RetClass>.R:$merge,
                   GPRMem:$rs1,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+                  VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
@@ -795,7 +800,8 @@ class VPseudoUSLoadMask<VReg RetClass,
   let hasSideEffects = 0;
   let Constraints = "$rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
 }
@@ -803,15 +809,15 @@ class VPseudoUSLoadMask<VReg RetClass,
 class VPseudoUSLoadFFNoMask<VReg RetClass,
                             int EEW> :
       Pseudo<(outs RetClass:$rd, GPR:$vl),
-             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
-                  ixlenimm:$sew, ixlenimm:$policy), []>,
+             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let Constraints = "$rd = $dest";
 }
@@ -821,7 +827,7 @@ class VPseudoUSLoadFFMask<VReg RetClass,
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
              (ins GetVRegNoV0<RetClass>.R:$merge,
                   GPRMem:$rs1,
-                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
+                  VMaskOp:$vm, AVL:$avl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
@@ -829,7 +835,8 @@ class VPseudoUSLoadFFMask<VReg RetClass,
   let hasSideEffects = 0;
   let Constraints = "$rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
 }
@@ -837,15 +844,15 @@ class VPseudoUSLoadFFMask<VReg RetClass,
 class VPseudoSLoadNoMask<VReg RetClass,
                          int EEW> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl,
-                  ixlenimm:$sew, ixlenimm:$policy), []>,
+             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let Constraints = "$rd = $dest";
 }
@@ -855,7 +862,7 @@ class VPseudoSLoadMask<VReg RetClass,
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
              (ins GetVRegNoV0<RetClass>.R:$merge,
                   GPRMem:$rs1, GPR:$rs2,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+                  VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
@@ -863,7 +870,8 @@ class VPseudoSLoadMask<VReg RetClass,
   let hasSideEffects = 0;
   let Constraints = "$rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
 }
@@ -917,56 +925,62 @@ class VPseudoILoadMask<VReg RetClass,
 class VPseudoUSStoreNoMask<VReg StClass,
                            int EEW> :
       Pseudo<(outs),
-             (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>,
+      !if(!eq(EEW, 1),
+          (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew),
+          (ins StClass:$rd, GPRMem:$rs1, AVL:$vl)), []>,
       RISCVVPseudo,
       RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  // For mask store, EEW = 1.
+  defvar hasSEWOp = !eq(EEW, 1);
+  let HasSEWOp = hasSEWOp;
+  let HasImplictSEW = !not(hasSEWOp);
+  let VSEW = !if(hasSEWOp, 0, !logtwo(!div(EEW, 8)));
 }
 
 class VPseudoUSStoreMask<VReg StClass,
                          int EEW> :
       Pseudo<(outs),
-             (ins StClass:$rd, GPRMem:$rs1,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
+             (ins StClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSE</*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoSStoreNoMask<VReg StClass,
                           int EEW> :
       Pseudo<(outs),
-             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
-                  AVL:$vl, ixlenimm:$sew), []>,
+             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSE</*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoSStoreMask<VReg StClass,
                         int EEW> :
       Pseudo<(outs),
-             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
+             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSE</*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoNullaryNoMask<VReg RegClass> :
@@ -1018,10 +1032,14 @@ class VPseudoNullaryPseudoM<string BaseInst> :
 class VPseudoUnaryNoMask<DAGOperand RetClass,
                          DAGOperand OpClass,
                          string Constraint = "",
-                         int TargetConstraintType = 1> :
+                         int TargetConstraintType = 1,
+                         bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$merge, OpClass:$rs2,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins RetClass:$merge, OpClass:$rs2,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$merge, OpClass:$rs2,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1029,17 +1047,24 @@ class VPseudoUnaryNoMask<DAGOperand RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
                                      DAGOperand OpClass,
                                      string Constraint = "",
-                                     int TargetConstraintType = 1> :
+                                     int TargetConstraintType = 1,
+                                     bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1047,19 +1072,26 @@ class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = 0;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUnaryMask<VReg RetClass,
                        VReg OpClass,
                        string Constraint = "",
-                       int TargetConstraintType = 1> :
+                       int TargetConstraintType = 1,
+                       bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                      VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                      VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1067,19 +1099,27 @@ class VPseudoUnaryMask<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUnaryMaskRoundingMode<VReg RetClass,
                                    VReg OpClass,
                                    string Constraint = "",
-                                   int TargetConstraintType = 1> :
+                                   int TargetConstraintType = 1,
+                                   bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
-                  VMaskOp:$vm, ixlenimm:$rm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                      VMaskOp:$vm, ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                      VMaskOp:$vm, ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1087,11 +1127,14 @@ class VPseudoUnaryMaskRoundingMode<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = 0;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUnaryMask_NoExcept<VReg RetClass,
@@ -1114,10 +1157,14 @@ class VPseudoUnaryMask_NoExcept<VReg RetClass,
 class VPseudoUnaryNoMask_FRM<VReg RetClass,
                              VReg OpClass,
                              string Constraint = "",
-                             int TargetConstraintType = 1> :
+                             int TargetConstraintType = 1,
+                             bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1125,19 +1172,27 @@ class VPseudoUnaryNoMask_FRM<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let HasRoundModeOp = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUnaryMask_FRM<VReg RetClass,
                            VReg OpClass,
                            string Constraint = "",
-                           int TargetConstraintType = 1> :
+                           int TargetConstraintType = 1,
+                           bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
-                  VMaskOp:$vm, ixlenimm:$frm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+              !if(hasSEWOp,
+                  (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                       VMaskOp:$vm, ixlenimm:$frm,
+                       AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                  (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                       VMaskOp:$vm, ixlenimm:$frm,
+                       AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1145,10 +1200,13 @@ class VPseudoUnaryMask_FRM<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
   let HasRoundModeOp = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUnaryNoMaskGPROut :
@@ -1178,14 +1236,16 @@ class VPseudoUnaryAnyMask<VReg RetClass,
                           VReg Op1Class> :
       Pseudo<(outs RetClass:$rd),
              (ins RetClass:$merge, Op1Class:$rs2,
-                  VR:$vm, AVL:$vl, ixlenimm:$sew), []>,
+                  VR:$vm, AVL:$vl), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
   let hasSideEffects = 0;
   let Constraints = "@earlyclobber $rd, $rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoBinaryNoMask<VReg RetClass,
@@ -1209,10 +1269,14 @@ class VPseudoBinaryNoMaskTU<VReg RetClass,
                             VReg Op1Class,
                             DAGOperand Op2Class,
                             string Constraint,
-                            int TargetConstraintType = 1> :
+                            int TargetConstraintType = 1,
+                            bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
-                  ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
+                     ixlenimm:$sew, ixlenimm:$policy),
+                (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
+                     ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1220,8 +1284,11 @@ class VPseudoBinaryNoMaskTU<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
@@ -1229,20 +1296,27 @@ class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
                                       DAGOperand Op2Class,
                                       string Constraint,
                                       int UsesVXRM_ = 1,
-                                      int TargetConstraintType = 1> :
+                                      int TargetConstraintType = 1,
+                                      bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = UsesVXRM_;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
@@ -1250,23 +1324,32 @@ class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
                                           DAGOperand Op2Class,
                                           string Constraint,
                                           int UsesVXRM_,
-                                          int TargetConstraintType = 1> :
+                                          int TargetConstraintType = 1,
+                                          bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge,
-                  Op1Class:$rs2, Op2Class:$rs1,
-                  VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
-                  ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
+                      ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
+                      ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = UsesVXRM_;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 // Special version of VPseudoBinaryNoMask where we pretend the first source is
@@ -1295,12 +1378,17 @@ class VPseudoTiedBinaryNoMask<VReg RetClass,
 class VPseudoTiedBinaryNoMaskRoundingMode<VReg RetClass,
                                           DAGOperand Op2Class,
                                           string Constraint,
-                                          int TargetConstraintType = 1> :
+                                          int TargetConstraintType = 1,
+                                          bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$rs2, Op2Class:$rs1,
-                  ixlenimm:$rm,
-                  AVL:$vl, ixlenimm:$sew,
-                  ixlenimm:$policy), []>,
+              !if(hasSEWOp,
+                  (ins RetClass:$rs2, Op2Class:$rs1,
+                       ixlenimm:$rm,
+                       AVL:$vl, ixlenimm:$sew,
+                       ixlenimm:$policy),
+                  (ins RetClass:$rs2, Op2Class:$rs1,
+                       ixlenimm:$rm,
+                       AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1308,12 +1396,15 @@ class VPseudoTiedBinaryNoMaskRoundingMode<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let isConvertibleToThreeAddress = 1;
   let IsTiedPseudo = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = 0;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
@@ -1365,11 +1456,16 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
                               RegisterClass Op1Class,
                               DAGOperand Op2Class,
                               string Constraint,
-                              int TargetConstraintType = 1> :
+                              int TargetConstraintType = 1,
+                              bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge,
-                  Op1Class:$rs2, Op2Class:$rs1,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1377,49 +1473,70 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoTernaryMaskPolicy<VReg RetClass,
                                RegisterClass Op1Class,
                                DAGOperand Op2Class,
-                               string Constraint> :
+                               string Constraint,
+                               bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge,
-                  Op1Class:$rs2, Op2Class:$rs1,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
   let hasSideEffects = 0;
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
                                            RegisterClass Op1Class,
                                            DAGOperand Op2Class,
-                                           string Constraint> :
+                                           string Constraint,
+                                           bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge,
-                  Op1Class:$rs2, Op2Class:$rs1,
-                  VMaskOp:$vm,
-                  ixlenimm:$rm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm,
+                      ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op1Class:$rs2, Op2Class:$rs1,
+                      VMaskOp:$vm,
+                      ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
   let hasSideEffects = 0;
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = 0;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 // Like VPseudoBinaryNoMask, but output can be V0.
@@ -1488,13 +1605,20 @@ class VPseudoTiedBinaryMask<VReg RetClass,
 class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
                                         DAGOperand Op2Class,
                                         string Constraint,
-                                        int TargetConstraintType = 1> :
+                                        int TargetConstraintType = 1,
+                                        bit hasSEWOp = 1> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
-             (ins GetVRegNoV0<RetClass>.R:$merge,
-                  Op2Class:$rs1,
-                  VMaskOp:$vm,
-                  ixlenimm:$rm,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op2Class:$rs1,
+                      VMaskOp:$vm,
+                      ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins GetVRegNoV0<RetClass>.R:$merge,
+                      Op2Class:$rs1,
+                      VMaskOp:$vm,
+                      ixlenimm:$rm,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1502,12 +1626,15 @@ class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
   let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
   let IsTiedPseudo = 1;
   let HasRoundModeOp = 1;
   let UsesVXRM = 0;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoBinaryCarryIn<VReg RetClass,
@@ -1579,10 +1706,14 @@ class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
                                      RegisterClass Op1Class,
                                      DAGOperand Op2Class,
                                      string Constraint,
-                                     int TargetConstraintType = 1> :
+                                     int TargetConstraintType = 1,
+                                     bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
-                  AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
+                      AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
+                      AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1591,17 +1722,24 @@ class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVecPolicyOp = 1;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
                                                  RegisterClass Op1Class,
                                                  DAGOperand Op2Class,
                                                  string Constraint,
-                                                 int TargetConstraintType = 1> :
+                                                 int TargetConstraintType = 1,
+                                                 bit hasSEWOp = 1> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
-                  ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+             !if(hasSEWOp,
+                 (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
+                      ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
+                 (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
+                      ixlenimm:$rm, AVL:$vl, ixlenimm:$policy)), []>,
       RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
@@ -1610,24 +1748,27 @@ class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
   let TargetOverlapConstraintType = TargetConstraintType;
   let HasVecPolicyOp = 1;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasSEWOp = hasSEWOp;
   let HasRoundModeOp = 1;
   let UsesVXRM = 0;
+  let HasImplictSEW = !not(hasSEWOp);
+  defvar sewDividedBy8 = !div(SEW, 8);
+  let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0);
 }
 
 class VPseudoUSSegLoadNoMask<VReg RetClass,
                              int EEW,
                              bits<4> NF> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl,
-                  ixlenimm:$sew, ixlenimm:$policy), []>,
+             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let Constraints = "$rd = $dest";
 }
@@ -1637,7 +1778,7 @@ class VPseudoUSSegLoadMask<VReg RetClass,
                            bits<4> NF> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
              (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+                  VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
@@ -1645,7 +1786,8 @@ class VPseudoUSSegLoadMask<VReg RetClass,
   let hasSideEffects = 0;
   let Constraints = "$rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
 }
@@ -1654,15 +1796,15 @@ class VPseudoUSSegLoadFFNoMask<VReg RetClass,
                                int EEW,
                                bits<4> NF> :
       Pseudo<(outs RetClass:$rd, GPR:$vl),
-             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
-                  ixlenimm:$sew, ixlenimm:$policy), []>,
+             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let Constraints = "$rd = $dest";
 }
@@ -1672,7 +1814,7 @@ class VPseudoUSSegLoadFFMask<VReg RetClass,
                              bits<4> NF> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
              (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
-                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
+                  VMaskOp:$vm, AVL:$avl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
@@ -1680,7 +1822,8 @@ class VPseudoUSSegLoadFFMask<VReg RetClass,
   let hasSideEffects = 0;
   let Constraints = "$rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
 }
@@ -1689,15 +1832,15 @@ class VPseudoSSegLoadNoMask<VReg RetClass,
                             int EEW,
                             bits<4> NF> :
       Pseudo<(outs RetClass:$rd),
-             (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl,
-             ixlenimm:$sew, ixlenimm:$policy), []>,
+             (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let Constraints = "$rd = $merge";
 }
@@ -1707,8 +1850,7 @@ class VPseudoSSegLoadMask<VReg RetClass,
                           bits<4> NF> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
              (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
-                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
-                  ixlenimm:$policy), []>,
+                  GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>,
       RISCVVPseudo,
       RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 1;
@@ -1716,7 +1858,8 @@ class VPseudoSSegLoadMask<VReg RetClass,
   let hasSideEffects = 0;
   let Constraints = "$rd = $merge";
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
 }
@@ -1771,59 +1914,60 @@ class VPseudoUSSegStoreNoMask<VReg ValClass,
                               int EEW,
                               bits<4> NF> :
       Pseudo<(outs),
-             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>,
+             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoUSSegStoreMask<VReg ValClass,
                             int EEW,
                             bits<4> NF> :
       Pseudo<(outs),
-             (ins ValClass:$rd, GPRMem:$rs1,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
+             (ins ValClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoSSegStoreNoMask<VReg ValClass,
                              int EEW,
                              bits<4> NF> :
       Pseudo<(outs),
-             (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset,
-                  AVL:$vl, ixlenimm:$sew), []>,
+             (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoSSegStoreMask<VReg ValClass,
                            int EEW,
                            bits<4> NF> :
       Pseudo<(outs),
-             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset,
-                  VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
+             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, VMaskOp:$vm, AVL:$vl), []>,
       RISCVVPseudo,
       RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
   let mayLoad = 0;
   let mayStore = 1;
   let hasSideEffects = 0;
   let HasVLOp = 1;
-  let HasSEWOp = 1;
+  let HasImplictSEW = 1;
+  let VSEW = !logtwo(!div(EEW, 8));
 }
 
 class VPseudoISegStoreNoMask<VReg ValClass,
@@ -2131,10 +2275,13 @@ multiclass VPseudoBinary<VReg RetClass,
                          bit Commutable = 0> {
   let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
     defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+    defvar hasSEWOp = !eq(sew, 0);
     def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
-                                       Constraint, TargetConstraintType>;
+                                       Constraint, TargetConstraintType,
+                                       hasSEWOp=hasSEWOp>;
     def suffix # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
-                                                   Constraint, TargetConstraintType>,
+                                                   Constraint, TargetConstraintType,
+                                                   hasSEWOp=hasSEWOp>,
                            RISCVMaskedPseudo<MaskIdx=3>;
   }
 }
@@ -2150,15 +2297,18 @@ multiclass VPseudoBinaryRoundingMode<VReg RetClass,
                                      bit Commutable = 0> {
   let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
     defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+    defvar hasSEWOp = !eq(sew, 0);
     def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
                                                  Constraint, UsesVXRM,
-                                                 TargetConstraintType>;
+                                                 TargetConstraintType,
+                                                 hasSEWOp>;
     def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode<RetClass,
                                                                Op1Class,
                                                                Op2Class,
                                                                Constraint,
                                                                UsesVXRM,
-                                                               TargetConstraintType>,
+                                                               TargetConstraintType,
+                                                               hasSEWOp>,
                            RISCVMaskedPseudo<MaskIdx=3>;
   }
 }
@@ -2190,10 +2340,11 @@ multiclass VPseudoBinaryEmul<VReg RetClass,
                              int sew = 0> {
   let VLMul = lmul.value, SEW=sew in {
     defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
+    defvar hasSEWOp = !eq(sew, 0);
     def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
-                                                       Constraint>;
+                                                       Constraint, hasSEWOp=hasSEWOp>;
     def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
-                                                                          Constraint>,
+                                                                   Constraint, hasSEWOp=hasSEWOp>,
                                                   RISCVMaskedPseudo<MaskIdx=3>;
   }
 }
@@ -2218,17 +2369,21 @@ multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
                                          string Constraint = "",
                                          int sew = 0,
                                          int TargetConstraintType = 1> {
-    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
-    let VLMul = MInfo.value in {
-    def suffix # "_TIED":
-      VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
-    def suffix # "_MASK_TIED" :
-      VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>,
-      RISCVMaskedPseudo<MaskIdx=2>;
+  defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+  defvar hasSEWOp = !eq(sew, 0);
+  let VLMul = MInfo.value, SEW = sew in {
+    def suffix # "_TIED" : VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class,
+                                                               Constraint,
+                                                               TargetConstraintType,
+                                                               hasSEWOp=hasSEWOp>;
+    def suffix # "_MASK_TIED" : VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class,
+                                                                  Constraint,
+                                                                  TargetConstraintType,
+                                                                  hasSEWOp=hasSEWOp>,
+                                RISCVMaskedPseudo<MaskIdx=2>;
   }
 }
 
-
 multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0, bit Commutable = 0> {
   defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew, Commutable=Commutable>;
 }
@@ -2568,11 +2723,11 @@ multiclass VPseudoVSQR_V_RM {
       foreach e = sews in {
         defvar suffix = "_" # mx # "_E" # e;
         let SEW = e in {
-          def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
+          def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass, hasSEWOp=0>,
                               SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
                                          forceMergeOpRead=true>;
           def "_V" #suffix # "_MASK"
-              : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
+              : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass, hasSEWOp=0>,
                 RISCVMaskedPseudo<MaskIdx = 2>,
                 SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
                            forceMergeOpRead=true>;
@@ -2585,12 +2740,12 @@ multiclass VPseudoVRCP_V {
   foreach m = MxListF in {
     defvar mx = m.MX;
     foreach e = SchedSEWSet<mx, isF=1>.val in {
-      let VLMul = m.value in {
+      let VLMul = m.value, SEW = e in {
         def "_V_" # mx # "_E" # e
-            : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+            : VPseudoUnaryNoMask<m.vrclass, m.vrclass, hasSEWOp=0>,
               SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
         def "_V_" # mx # "_E" # e # "_MASK"
-            : VPseudoUnaryMask<m.vrclass, m.vrclass>,
+            : VPseudoUnaryMask<m.vrclass, m.vrclass, hasSEWOp=0>,
               RISCVMaskedPseudo<MaskIdx = 2>,
               SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
       }
@@ -2602,12 +2757,12 @@ multiclass VPseudoVRCP_V_RM {
   foreach m = MxListF in {
     defvar mx = m.MX;
     foreach e = SchedSEWSet<mx, isF=1>.val in {
-      let VLMul = m.value in {
+      let VLMul = m.value, SEW = e in {
         def "_V_" # mx # "_E" # e
-            : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
+            : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass, hasSEWOp=0>,
               SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
         def "_V_" # mx # "_E" # e # "_MASK"
-            : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
+            : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass, hasSEWOp=0>,
               RISCVMaskedPseudo<MaskIdx = 2>,
               SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
       }
@@ -3205,8 +3360,12 @@ multiclass VPseudoTernaryWithTailPolicy<VReg RetClass,
   let VLMul = MInfo.value, SEW=sew in {
     defvar mx = MInfo.MX;
     let isCommutable = Commutable in
-    def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
-    def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
+    def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class,
+                                                               Op2Class, Constraint,
+                                                               hasSEWOp=0>;
+    def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy<RetClass, Op1Class,
+                                                                   Op2Class, Constraint,
+                                                                   hasSEWOp=0>,
                                           RISCVMaskedPseudo<MaskIdx=3, MaskAffectsRes=true>;
   }
 }
@@ -3223,10 +3382,12 @@ multiclass VPseudoTernaryWithTailPolicyRoundingMode<VReg RetClass,
     let isCommutable = Commutable in
     def "_" # mx # "_E" # sew
         : VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
-                                                     Op2Class, Constraint>;
+                                                     Op2Class, Constraint,
+                                                     hasSEWOp=0>;
     def "_" # mx # "_E" # sew # "_MASK"
         : VPseudoTernaryMaskPolicyRoundingMode<RetClass, Op1Class,
-                                               Op2Class, Constraint>,
+                                               Op2Class, Constraint,
+                                               hasSEWOp=0>,
           RISCVMaskedPseudo<MaskIdx=3, MaskAffectsRes=true>;
   }
 }
@@ -3254,18 +3415,21 @@ multiclass VPseudoTernaryWithPolicyRoundingMode<VReg RetClass,
                                                 int sew = 0,
                                                 bit Commutable = 0,
                                                 int TargetConstraintType = 1> {
-  let VLMul = MInfo.value in {
+  let VLMul = MInfo.value, SEW = sew in {
     defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+    defvar hasSEWOp = !eq(sew, 0);
     let isCommutable = Commutable in
     def suffix :
         VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
                                                    Op2Class, Constraint,
-                                                   TargetConstraintType>;
+                                                   TargetConstraintType,
+                                                   hasSEWOp=hasSEWOp>;
     def suffix # "_MASK" :
         VPseudoBinaryMaskPolicyRoundingMode<RetClass, Op1Class,
                                             Op2Class, Constraint,
                                             UsesVXRM_=0,
-                                            TargetConstraintType=TargetConstraintType>,
+                                            TargetConstraintType=TargetConstraintType,
+                                            hasSEWOp=hasSEWOp>,
                                    RISCVMaskedPseudo<MaskIdx=3>;
   }
 }
@@ -3596,10 +3760,14 @@ multiclass VPseudoConversion<VReg RetClass,
                              int sew = 0,
                              int TargetConstraintType = 1> {
   defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+  defvar hasSEWOp = !eq(sew, 0);
   let VLMul = MInfo.value, SEW=sew in {
-    def suffix : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint, TargetConstraintType>;
-    def suffix # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
-                                            Constraint, TargetConstraintType>,
+    def suffix : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint,
+                                    TargetConstraintType,
+                                    hasSEWOp=hasSEWOp>;
+    def suffix # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class, Constraint,
+                                            TargetConstraintType,
+                                            hasSEWOp=hasSEWOp>,
                            RISCVMaskedPseudo<MaskIdx=2>;
   }
 }
@@ -3612,10 +3780,14 @@ multiclass VPseudoConversionRoundingMode<VReg RetClass,
                              int TargetConstraintType = 1> {
   let VLMul = MInfo.value, SEW=sew in {
     defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
-    def suffix : VPseudoUnaryNoMaskRoundingMode<RetClass, Op1Class, Constraint, TargetConstraintType>;
+    defvar hasSEWOp = !eq(sew, 0);
+    def suffix : VPseudoUnaryNoMaskRoundingMode<RetClass, Op1Class, Constraint,
+                                                TargetConstraintType,
+                                                hasSEWOp=hasSEWOp>;
     def suffix # "_MASK" : VPseudoUnaryMaskRoundingMode<RetClass, Op1Class,
                                                         Constraint,
-                                                        TargetConstraintType>,
+                                                        TargetConstraintType,
+                                                        hasSEWOp=hasSEWOp>,
                            RISCVMaskedPseudo<MaskIdx=2>;
   }
 }
@@ -3629,10 +3801,13 @@ multiclass VPseudoConversionRM<VReg RetClass,
                                int TargetConstraintType = 1> {
   let VLMul = MInfo.value, SEW=sew in {
     defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
-    def suffix : VPseudoUnaryNoMask_FRM<RetClass, Op1Class,
-                                        Constraint, TargetConstraintType>;
-    def suffix # "_MASK" : VPseudoUnaryMask_FRM<RetClass, Op1Class,
-                                                Constraint, TargetConstraintType>,
+    defvar hasSEWOp = !eq(sew, 0);
+    def suffix : VPseudoUnaryNoMask_FRM<RetClass, Op1Class, Constraint,
+                                        TargetConstraintType,
+                                        hasSEWOp=hasSEWOp>;
+    def suffix # "_MASK" : VPseudoUnaryMask_FRM<RetClass, Op1Class, Constraint,
+                                                TargetConstraintType,
+                                                hasSEWOp=hasSEWOp>,
                            RISCVMaskedPseudo<MaskIdx=2>;
   }
 }
@@ -3984,13 +4159,15 @@ class VPatUnaryNoMask<string intrinsic_name,
                    (result_type result_reg_class:$merge),
                    (op2_type op2_reg_class:$rs2),
                    VLOpFrag)),
-                   (!cast<Instruction>(
-                     !if(isSEWAware,
-                         inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
-                         inst#"_"#kind#"_"#vlmul.MX))
-                   (result_type result_reg_class:$merge),
-                   (op2_type op2_reg_class:$rs2),
-                   GPR:$vl, log2sew, TU_MU)>;
+                  !if(isSEWAware,
+                      (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
+                       (result_type result_reg_class:$merge),
+                       (op2_type op2_reg_class:$rs2),
+                       GPR:$vl, TU_MU),
+                      (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                       (result_type result_reg_class:$merge),
+                       (op2_type op2_reg_class:$rs2),
+                       GPR:$vl, log2sew, TU_MU))>;
 
 class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
                                   string inst,
@@ -4007,14 +4184,17 @@ class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
                    (op2_type op2_reg_class:$rs2),
                    (XLenVT timm:$round),
                    VLOpFrag)),
-                   (!cast<Instruction>(
-                      !if(isSEWAware,
-                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
-                          inst#"_"#kind#"_"#vlmul.MX))
-                   (result_type result_reg_class:$merge),
-                   (op2_type op2_reg_class:$rs2),
-                   (XLenVT timm:$round),
-                   GPR:$vl, log2sew, TU_MU)>;
+                   !if(isSEWAware,
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_reg_class:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, TU_MU),
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_reg_class:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, log2sew, TU_MU))>;
 
 
 class VPatUnaryMask<string intrinsic_name,
@@ -4033,13 +4213,15 @@ class VPatUnaryMask<string intrinsic_name,
                    (op2_type op2_reg_class:$rs2),
                    (mask_type V0),
                    VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(
-                      !if(isSEWAware,
-                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
-                   (result_type result_reg_class:$merge),
-                   (op2_type op2_reg_class:$rs2),
-                   (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+                   !if(isSEWAware,
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_reg_class:$rs2),
+                        (mask_type V0), GPR:$vl, (XLenVT timm:$policy)),
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_reg_class:$rs2),
+                        (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy)))>;
 
 class VPatUnaryMaskRoundingMode<string intrinsic_name,
                                 string inst,
@@ -4058,15 +4240,19 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
                    (mask_type V0),
                    (XLenVT timm:$round),
                    VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(
-                      !if(isSEWAware,
-                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
-                   (result_type result_reg_class:$merge),
-                   (op2_type op2_reg_class:$rs2),
-                   (mask_type V0),
-                   (XLenVT timm:$round),
-                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+                   !if(isSEWAware,
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_reg_class:$rs2),
+                        (mask_type V0),
+                        (XLenVT timm:$round),
+                        GPR:$vl, (XLenVT timm:$policy)),
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_reg_class:$rs2),
+                        (mask_type V0),
+                        (XLenVT timm:$round),
+                        GPR:$vl, log2sew, (XLenVT timm:$policy)))>;
 
 
 class VPatMaskUnaryNoMask<string intrinsic_name,
@@ -4112,7 +4298,7 @@ class VPatUnaryAnyMask<string intrinsic,
                    (result_type result_reg_class:$merge),
                    (op1_type op1_reg_class:$rs1),
                    (mask_type VR:$rs2),
-                   GPR:$vl, log2sew)>;
+                   GPR:$vl)>;
 
 class VPatBinaryM<string intrinsic_name,
                   string inst,
@@ -4139,17 +4325,24 @@ class VPatBinaryNoMaskTU<string intrinsic_name,
                          int sew,
                          VReg result_reg_class,
                          VReg op1_reg_class,
-                         DAGOperand op2_kind> :
+                         DAGOperand op2_kind,
+                         bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
                    (result_type result_reg_class:$merge),
                    (op1_type op1_reg_class:$rs1),
                    (op2_type op2_kind:$rs2),
                    VLOpFrag)),
-                   (!cast<Instruction>(inst)
-                   (result_type result_reg_class:$merge),
-                   (op1_type op1_reg_class:$rs1),
-                   (op2_type op2_kind:$rs2),
-                   GPR:$vl, sew, TU_MU)>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst)
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        GPR:$vl, sew, TU_MU),
+                       (!cast<Instruction>(inst)
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        GPR:$vl, TU_MU))>;
 
 class VPatBinaryNoMaskRoundingMode<string intrinsic_name,
                                    string inst,
@@ -4158,19 +4351,27 @@ class VPatBinaryNoMaskRoundingMode<string intrinsic_name,
                                    ValueType op2_type,
                                    int sew,
                                    VReg op1_reg_class,
-                                   DAGOperand op2_kind> :
+                                   DAGOperand op2_kind,
+                                   bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
                    (result_type (undef)),
                    (op1_type op1_reg_class:$rs1),
                    (op2_type op2_kind:$rs2),
                    (XLenVT timm:$round),
                    VLOpFrag)),
-                   (!cast<Instruction>(inst)
-                   (result_type (IMPLICIT_DEF)),
-                   (op1_type op1_reg_class:$rs1),
-                   (op2_type op2_kind:$rs2),
-                   (XLenVT timm:$round),
-                   GPR:$vl, sew, TA_MA)>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst)
+                        (result_type (IMPLICIT_DEF)),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, sew, TA_MA),
+                       (!cast<Instruction>(inst)
+                        (result_type (IMPLICIT_DEF)),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, TA_MA))>;
 
 class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
                                      string inst,
@@ -4180,19 +4381,27 @@ class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
                                      int sew,
                                      VReg result_reg_class,
                                      VReg op1_reg_class,
-                                     DAGOperand op2_kind> :
+                                     DAGOperand op2_kind,
+                                     bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
                    (result_type result_reg_class:$merge),
                    (op1_type op1_reg_class:$rs1),
                    (op2_type op2_kind:$rs2),
                    (XLenVT timm:$round),
                    VLOpFrag)),
-                   (!cast<Instruction>(inst)
-                   (result_type result_reg_class:$merge),
-                   (op1_type op1_reg_class:$rs1),
-                   (op2_type op2_kind:$rs2),
-                   (XLenVT timm:$round),
-                   GPR:$vl, sew, TU_MU)>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst)
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, sew, TU_MU),
+                       (!cast<Instruction>(inst)
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, TU_MU))>;
 
 
 // Same as above but source operands are swapped.
@@ -4244,18 +4453,25 @@ class VPatBinaryMaskTA<string intrinsic_name,
                        int sew,
                        VReg result_reg_class,
                        VReg op1_reg_class,
-                       DAGOperand op2_kind> :
+                       DAGOperand op2_kind,
+                       bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
                    (result_type result_reg_class:$merge),
                    (op1_type op1_reg_class:$rs1),
                    (op2_type op2_kind:$rs2),
                    (mask_type V0),
                    VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(inst#"_MASK")
-                   (result_type result_reg_class:$merge),
-                   (op1_type op1_reg_class:$rs1),
-                   (op2_type op2_kind:$rs2),
-                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy)),
+                       (!cast<Instruction>(inst#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (mask_type V0), GPR:$vl, (XLenVT timm:$policy)))>;
 
 class VPatBinaryMaskTARoundingMode<string intrinsic_name,
                                    string inst,
@@ -4266,7 +4482,8 @@ class VPatBinaryMaskTARoundingMode<string intrinsic_name,
                                    int sew,
                                    VReg result_reg_class,
                                    VReg op1_reg_class,
-                                   DAGOperand op2_kind> :
+                                   DAGOperand op2_kind,
+                                   bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
                    (result_type result_reg_class:$merge),
                    (op1_type op1_reg_class:$rs1),
@@ -4274,13 +4491,21 @@ class VPatBinaryMaskTARoundingMode<string intrinsic_name,
                    (mask_type V0),
                    (XLenVT timm:$round),
                    VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(inst#"_MASK")
-                   (result_type result_reg_class:$merge),
-                   (op1_type op1_reg_class:$rs1),
-                   (op2_type op2_kind:$rs2),
-                   (mask_type V0),
-                   (XLenVT timm:$round),
-                   GPR:$vl, sew, (XLenVT timm:$policy))>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (mask_type V0),
+                        (XLenVT timm:$round),
+                        GPR:$vl, sew, (XLenVT timm:$policy)),
+                       (!cast<Instruction>(inst#"_MASK")
+                        (result_type result_reg_class:$merge),
+                        (op1_type op1_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (mask_type V0),
+                        (XLenVT timm:$round),
+                        GPR:$vl, (XLenVT timm:$policy)))>;
 
 // Same as above but source operands are swapped.
 class VPatBinaryMaskSwapped<string intrinsic_name,
@@ -4328,18 +4553,25 @@ class VPatTiedBinaryNoMaskRoundingMode<string intrinsic_name,
                                        ValueType op2_type,
                                        int sew,
                                        VReg result_reg_class,
-                                       DAGOperand op2_kind> :
+                                       DAGOperand op2_kind,
+                                       bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
                    (result_type (undef)),
                    (result_type result_reg_class:$rs1),
                    (op2_type op2_kind:$rs2),
                    (XLenVT timm:$round),
                    VLOpFrag)),
-                   (!cast<Instruction>(inst#"_TIED")
-                   (result_type result_reg_class:$rs1),
-                   (op2_type op2_kind:$rs2),
-                   (XLenVT timm:$round),
-                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst#"_TIED")
+                        (result_type result_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, sew, TAIL_AGNOSTIC),
+                       (!cast<Instruction>(inst#"_TIED")
+                        (result_type result_reg_class:$rs1),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, TAIL_AGNOSTIC))>;
 
 class VPatTiedBinaryNoMaskTU<string intrinsic_name,
                              string inst,
@@ -4364,18 +4596,25 @@ class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
                                          ValueType op2_type,
                                          int sew,
                                          VReg result_reg_class,
-                                         DAGOperand op2_kind> :
+                                         DAGOperand op2_kind,
+                                         bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
                    (result_type result_reg_class:$merge),
                    (result_type result_reg_class:$merge),
                    (op2_type op2_kind:$rs2),
                    (XLenVT timm:$round),
                    VLOpFrag)),
-                   (!cast<Instruction>(inst#"_TIED")
-                   (result_type result_reg_class:$merge),
-                   (op2_type op2_kind:$rs2),
-                   (XLenVT timm:$round),
-                   GPR:$vl, sew, TU_MU)>;
+                   !if(hasSEWOp,
+                       (!cast<Instruction>(inst#"_TIED")
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, sew, TU_MU),
+                       (!cast<Instruction>(inst#"_TIED")
+                        (result_type result_reg_class:$merge),
+                        (op2_type op2_kind:$rs2),
+                        (XLenVT timm:$round),
+                        GPR:$vl, TU_MU))>;
 
 class VPatTiedBinaryMask<string intrinsic_name,
                          string inst,
@@ -4403,7 +4642,8 @@ class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
                                      ValueType mask_type,
                                      int sew,
                                      VReg result_reg_class,
-                                     DAGOperand op2_kind> :
+                                     DAGOperand op2_kind,
+                                     bit hasSEWOp = 1> :
   Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
                    (result_type result_reg_class:$merge),
                    (result_type result_reg_class:$merge),
@@ -4411,12 +4651,19 @@ class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
                    (mask_type V0),
                    (XLenVT timm:$round),
                    VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(inst#"_MASK_TIED")
-                   (result_type result_reg_class:$merge),
-                   (op2_type op2_kind:$rs2),
-                   (mask_type V0),
-                   (XLenVT timm:$round),
-                   GPR:$vl, sew, (XLenVT timm:$policy))>;
+                   !if(hasSEWOp,
+                      (!cast<Instruction>(inst#"_MASK_TIED")
+                       (result_type result_reg_class:$merge),
+                       (op2_type op2_kind:$rs2),
+                       (mask_type V0),
+                       (XLenVT timm:$round),
+                       GPR:$vl, sew, (XLenVT timm:$policy)),
+                      (!cast<Instruction>(inst#"_MASK_TIED")
+                       (result_type result_reg_class:$merge),
+                       (op2_type op2_kind:$rs2),
+                       (mask_type V0),
+                       (XLenVT timm:$round),
+                       GPR:$vl, (XLenVT timm:$policy)))>;
 
 class VPatTernaryNoMask<string intrinsic,
                         string inst,
@@ -4460,7 +4707,7 @@ class VPatTernaryNoMaskTA<string intrinsic,
                     result_reg_class:$rs3,
                     (op1_type op1_reg_class:$rs1),
                     op2_kind:$rs2,
-                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+                    GPR:$vl, TAIL_AGNOSTIC)>;
 
 class VPatTernaryNoMaskTARoundingMode<string intrinsic,
                           string inst,
@@ -4484,7 +4731,7 @@ class VPatTernaryNoMaskTARoundingMode<string intrinsic,
                     (op1_type op1_reg_class:$rs1),
                     op2_kind:$rs2,
                     (XLenVT timm:$round),
-                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+                    GPR:$vl, TAIL_AGNOSTIC)>;
 
 class VPatTernaryNoMaskWithPolicy<string intrinsic,
                                   string inst,
@@ -4526,14 +4773,19 @@ class VPatTernaryNoMaskWithPolicyRoundingMode<string intrinsic,
                     (op2_type op2_kind:$rs2),
                     (XLenVT timm:$round),
                     VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(!if(isSEWAware,
-                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
-                          inst#"_"#kind#"_"#vlmul.MX))
-                    result_reg_class:$rs3,
-                    (op1_type op1_reg_class:$rs1),
-                    op2_kind:$rs2,
-                    (XLenVT timm:$round),
-                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+                   !if(isSEWAware,
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
+                        result_reg_class:$rs3,
+                        (op1_type op1_reg_class:$rs1),
+                        op2_kind:$rs2,
+                        (XLenVT timm:$round),
+                        GPR:$vl, (XLenVT timm:$policy)),
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                        result_reg_class:$rs3,
+                        (op1_type op1_reg_class:$rs1),
+                        op2_kind:$rs2,
+                        (XLenVT timm:$round),
+                        GPR:$vl, log2sew, (XLenVT timm:$policy)))>;
 
 class VPatTernaryMask<string intrinsic,
                       string inst,
@@ -4605,15 +4857,21 @@ class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
                     (mask_type V0),
                     (XLenVT timm:$round),
                     VLOpFrag, (XLenVT timm:$policy))),
-                   (!cast<Instruction>(!if(isSEWAware,
-                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew) # "_MASK",
-                          inst#"_"#kind#"_"#vlmul.MX # "_MASK"))
-                    result_reg_class:$rs3,
-                    (op1_type op1_reg_class:$rs1),
-                    op2_kind:$rs2,
-                    (mask_type V0),
-                    (XLenVT timm:$round),
-                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+                   !if(isSEWAware,
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew) # "_MASK")
+                        result_reg_class:$rs3,
+                        (op1_type op1_reg_class:$rs1),
+                        op2_kind:$rs2,
+                        (mask_type V0),
+                        (XLenVT timm:$round),
+                        GPR:$vl, (XLenVT timm:$policy)),
+                       (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
+                        result_reg_class:$rs3,
+                        (op1_type op1_reg_class:$rs1),
+                        op2_kind:$rs2,
+                        (mask_type V0),
+                        (XLenVT timm:$round),
+                        GPR:$vl, log2sew, (XLenVT timm:$policy)))>;
 
 class VPatTernaryMaskTA<string intrinsic,
                         string inst,
@@ -4638,7 +4896,7 @@ class VPatTernaryMaskTA<string intrinsic,
                     (op1_type op1_reg_class:$rs1),
                     op2_kind:$rs2,
                     (mask_type V0),
-                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+                    GPR:$vl, TAIL_AGNOSTIC)>;
 
 class VPatTernaryMaskTARoundingMode<string intrinsic,
                                     string inst,
@@ -4665,7 +4923,7 @@ class VPatTernaryMaskTARoundingMode<string intrinsic,
                     op2_kind:$rs2,
                     (mask_type V0),
                     (XLenVT timm:$round),
-                    GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+                    GPR:$vl, TAIL_AGNOSTIC)>;
 
 multiclass VPatUnaryS_M<string intrinsic_name,
                              string inst> {
@@ -4807,12 +5065,13 @@ multiclass VPatBinary<string intrinsic,
                       int sew,
                       VReg result_reg_class,
                       VReg op1_reg_class,
-                      DAGOperand op2_kind> {
+                      DAGOperand op2_kind,
+                      bit hasSEWOp = 1> {
   def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
-                           sew, result_reg_class, op1_reg_class, op2_kind>;
+                           sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>;
   def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
                          mask_type, sew, result_reg_class, op1_reg_class,
-                         op2_kind>;
+                         op2_kind, hasSEWOp>;
 }
 
 multiclass VPatBinaryRoundingMode<string intrinsic,
@@ -4824,14 +5083,15 @@ multiclass VPatBinaryRoundingMode<string intrinsic,
                                   int sew,
                                   VReg result_reg_class,
                                   VReg op1_reg_class,
-                                  DAGOperand op2_kind> {
+                                  DAGOperand op2_kind,
+                                  bit hasSEWOp = 1> {
   def : VPatBinaryNoMaskRoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
-                                       sew, op1_reg_class, op2_kind>;
+                                       sew, op1_reg_class, op2_kind, hasSEWOp>;
   def : VPatBinaryNoMaskTURoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
-                                       sew, result_reg_class, op1_reg_class, op2_kind>;
+                                       sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>;
   def : VPatBinaryMaskTARoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
                                      mask_type, sew, result_reg_class, op1_reg_class,
-                                     op2_kind>;
+                                     op2_kind, hasSEWOp>;
 }
 
 multiclass VPatBinarySwapped<string intrinsic,
@@ -4966,7 +5226,8 @@ multiclass VPatBinaryV_VV<string intrinsic, string instruction,
                           instruction # "_VV_" # vti.LMul.MX),
                       vti.Vector, vti.Vector, vti.Vector,vti.Mask,
                       vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, vti.RegClass>;
+                      vti.RegClass, vti.RegClass,
+                      hasSEWOp=!not(isSEWAware)>;
 }
 
 multiclass VPatBinaryV_VV_RM<string intrinsic, string instruction,
@@ -4979,7 +5240,8 @@ multiclass VPatBinaryV_VV_RM<string intrinsic, string instruction,
                                       instruction # "_VV_" # vti.LMul.MX),
                                   vti.Vector, vti.Vector, vti.Vector,vti.Mask,
                                   vti.Log2SEW, vti.RegClass,
-                                  vti.RegClass, vti.RegClass>;
+                                  vti.RegClass, vti.RegClass,
+                                  hasSEWOp=!not(isSEWAware)>;
 }
 
 multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
@@ -4991,7 +5253,8 @@ multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
                       instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
                       vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
                       vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, vti.RegClass>;
+                      vti.RegClass, vti.RegClass,
+                      hasSEWOp=0>;
   }
 }
 
@@ -5011,7 +5274,8 @@ multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
       defm : VPatBinary<intrinsic, inst,
                         vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
                         vti.Log2SEW, vti.RegClass,
-                        vti.RegClass, ivti.RegClass>;
+                        vti.RegClass, ivti.RegClass,
+                        hasSEWOp=0>;
     }
   }
 }
@@ -5027,7 +5291,8 @@ multiclass VPatBinaryV_VX<string intrinsic, string instruction,
                           instruction#"_"#kind#"_"#vti.LMul.MX),
                       vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
                       vti.Log2SEW, vti.RegClass,
-                      vti.RegClass, vti.ScalarRegClass>;
+                      vti.RegClass, vti.ScalarRegClass,
+                      hasSEWOp=!not(isSEWAware)>;
   }
 }
 
@@ -5042,7 +5307,8 @@ multiclass VPatBinaryV_VX_RM<string intrinsic, string instruction,
                                       instruction#"_"#kind#"_"#vti.LMul.MX),
                                   vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
                                   vti.Log2SEW, vti.RegClass,
-                                  vti.RegClass, vti.ScalarRegClass>;
+                                  vti.RegClass, vti.ScalarRegClass,
+                                  hasSEWOp=!not(isSEWAware)>;
   }
 }
 
@@ -5113,7 +5379,8 @@ multiclass VPatBinaryW_VV_RM<string intrinsic, string instruction,
     defm : VPatBinaryRoundingMode<intrinsic, name,
                                   Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
                                   Vti.Log2SEW, Wti.RegClass,
-                                  Vti.RegClass, Vti.RegClass>;
+                                  Vti.RegClass, Vti.RegClass,
+                                  hasSEWOp=!not(isSEWAware)>;
   }
 }
 
@@ -5146,7 +5413,8 @@ multiclass VPatBinaryW_VX_RM<string intrinsic, string instruction,
     defm : VPatBinaryRoundingMode<intrinsic, name,
                                   Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
                                   Vti.Log2SEW, Wti.RegClass,
-                                  Vti.RegClass, Vti.ScalarRegClass>;
+                                  Vti.RegClass, Vti.ScalarRegClass,
+                                  hasSEWOp=!not(isSEWAware)>;
   }
 }
 
@@ -5187,26 +5455,32 @@ multiclass VPatBinaryW_WV_RM<string intrinsic, string instruction,
     defvar name = !if(isSEWAware,
                       instruction # "_WV_" # Vti.LMul.MX # "_E" # Vti.SEW,
                       instruction # "_WV_" # Vti.LMul.MX);
+    defvar hasSEWOp = !not(isSEWAware);
     let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
                                  GetVTypePredicates<Wti>.Predicates) in {
       def : VPatTiedBinaryNoMaskRoundingMode<intrinsic, name,
                                              Wti.Vector, Vti.Vector,
-                                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
+                                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass,
+                                             hasSEWOp=hasSEWOp>;
       def : VPatBinaryNoMaskTURoundingMode<intrinsic, name,
                                            Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
-                                           Wti.RegClass, Wti.RegClass, Vti.RegClass>;
+                                           Wti.RegClass, Wti.RegClass, Vti.RegClass,
+                                           hasSEWOp=hasSEWOp>;
       let AddedComplexity = 1 in {
       def : VPatTiedBinaryNoMaskTURoundingMode<intrinsic, name,
                                                Wti.Vector, Vti.Vector,
-                                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
+                                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass,
+                                               hasSEWOp=hasSEWOp>;
       def : VPatTiedBinaryMaskRoundingMode<intrinsic, name,
                                            Wti.Vector, Vti.Vector, Vti.Mask,
-                                           Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
+                                           Vti.Log2SEW, Wti.RegClass, Vti.RegClass,
+                                           hasSEWOp=hasSEWOp>;
       }
       def : VPatBinaryMaskTARoundingMode<intrinsic, name,
                                          Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
                                          Vti.Log2SEW, Wti.RegClass,
-                                         Wti.RegClass, Vti.RegClass>;
+                                         Wti.RegClass, Vti.RegClass,
+                                         hasSEWOp=hasSEWOp>;
     }
   }
 }
@@ -5240,7 +5514,8 @@ multiclass VPatBinaryW_WX_RM<string intrinsic, string instruction,
     defm : VPatBinaryRoundingMode<intrinsic, name,
                                   Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
                                   Vti.Log2SEW, Wti.RegClass,
-                                  Wti.RegClass, Vti.ScalarRegClass>;
+                                  Wti.RegClass, Vti.ScalarRegClass,
+                                  hasSEWOp=!not(isSEWAware)>;
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index b4af83a3cbf671..8a3fe0ff908cec 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -34,11 +34,10 @@ multiclass VPatUSLoadStoreSDNode<ValueType type,
   defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
   // Load
   def : Pat<(type (load GPR:$rs1)),
-            (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl,
-                        log2sew, TA_MA)>;
+            (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, TA_MA)>;
   // Store
   def : Pat<(store type:$rs2, GPR:$rs1),
-            (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>;
+            (store_instr reg_class:$rs2, GPR:$rs1, avl)>;
 }
 
 multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type,
@@ -83,14 +82,17 @@ class VPatBinarySDNode_VV<SDPatternOperator vop,
     Pat<(result_type (vop
                      (op_type op_reg_class:$rs1),
                      (op_type op_reg_class:$rs2))),
-        (!cast<Instruction>(
-                     !if(isSEWAware,
-                         instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew),
-                         instruction_name#"_VV_"# vlmul.MX))
+        !if(isSEWAware,
+            (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew))
                      (result_type (IMPLICIT_DEF)),
                      op_reg_class:$rs1,
                      op_reg_class:$rs2,
-                     avl, log2sew, TA_MA)>;
+                     avl, TA_MA),
+            (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX)
+                     (result_type (IMPLICIT_DEF)),
+                     op_reg_class:$rs1,
+                     op_reg_class:$rs2,
+                     avl, log2sew, TA_MA))>;
 
 class VPatBinarySDNode_VV_RM<SDPatternOperator vop,
                              string instruction_name,
@@ -104,17 +106,21 @@ class VPatBinarySDNode_VV_RM<SDPatternOperator vop,
     Pat<(result_type (vop
                      (op_type op_reg_class:$rs1),
                      (op_type op_reg_class:$rs2))),
-        (!cast<Instruction>(
-                     !if(isSEWAware,
-                         instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew),
-                         instruction_name#"_VV_"# vlmul.MX))
-                     (result_type (IMPLICIT_DEF)),
-                     op_reg_class:$rs1,
-                     op_reg_class:$rs2,
-                     // Value to indicate no rounding mode change in
-                     // RISCVInsertReadWriteCSR
-                     FRM_DYN,
-                     avl, log2sew, TA_MA)>;
+        !if(isSEWAware,
+            (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew))
+                         (result_type (IMPLICIT_DEF)),
+                         op_reg_class:$rs1,
+                         op_reg_class:$rs2,
+                         // Value to indicate no rounding mode change in
+                         // RISCVInsertReadWriteCSR
+                         FRM_DYN,
+                         avl, TA_MA),
+            (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX)
+                         (result_type (IMPLICIT_DEF)),
+                         op_reg_class:$rs1,
+                         op_reg_class:$rs2,
+                         FRM_DYN,
+                         avl, log2sew, TA_MA))>;
 
 class VPatBinarySDNode_XI<SDPatternOperator vop,
                           string instruction_name,
@@ -131,14 +137,17 @@ class VPatBinarySDNode_XI<SDPatternOperator vop,
     Pat<(result_type (vop
                      (vop_type vop_reg_class:$rs1),
                      (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))),
-        (!cast<Instruction>(
-                     !if(isSEWAware,
-                         instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew),
-                         instruction_name#_#suffix#_# vlmul.MX))
-                     (result_type (IMPLICIT_DEF)),
-                     vop_reg_class:$rs1,
-                     xop_kind:$rs2,
-                     avl, log2sew, TA_MA)>;
+        !if(isSEWAware,
+            (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew))
+                         (result_type (IMPLICIT_DEF)),
+                         vop_reg_class:$rs1,
+                         xop_kind:$rs2,
+                         avl, TA_MA),
+            (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX)
+                         (result_type (IMPLICIT_DEF)),
+                         vop_reg_class:$rs1,
+                         xop_kind:$rs2,
+                         avl, log2sew, TA_MA))>;
 
 multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name,
                                   list<VTypeInfo> vtilist = AllIntegerVectors,
@@ -182,14 +191,17 @@ class VPatBinarySDNode_VF<SDPatternOperator vop,
                           bit isSEWAware = 0> :
     Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
                           (vop_type (SplatFPOp xop_kind:$rs2)))),
-        (!cast<Instruction>(
-                     !if(isSEWAware,
-                         instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
-                         instruction_name#"_"#vlmul.MX))
-                     (result_type (IMPLICIT_DEF)),
-                     vop_reg_class:$rs1,
-                     (xop_type xop_kind:$rs2),
-                     avl, log2sew, TA_MA)>;
+        !if(isSEWAware,
+            (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
+                         (result_type (IMPLICIT_DEF)),
+                         vop_reg_class:$rs1,
+                         (xop_type xop_kind:$rs2),
+                         avl, TA_MA),
+            (!cast<Instruction>(instruction_name#"_"#vlmul.MX)
+                         (result_type (IMPLICIT_DEF)),
+                         vop_reg_class:$rs1,
+                         (xop_type xop_kind:$rs2),
+                         avl, log2sew, TA_MA))>;
 
 class VPatBinarySDNode_VF_RM<SDPatternOperator vop,
                              string instruction_name,
@@ -204,17 +216,21 @@ class VPatBinarySDNode_VF_RM<SDPatternOperator vop,
                              bit isSEWAware = 0> :
     Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
                           (vop_type (SplatFPOp xop_kind:$rs2)))),
-        (!cast<Instruction>(
-                     !if(isSEWAware,
-                         instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
-                         instruction_name#"_"#vlmul.MX))
-                     (result_type (IMPLICIT_DEF)),
-                     vop_reg_class:$rs1,
-                     (xop_type xop_kind:$rs2),
-                     // Value to indicate no rounding mode change in
-                     // RISCVInsertReadWriteCSR
-                     FRM_DYN,
-                     avl, log2sew, TA_MA)>;
+        !if(isSEWAware,
+            (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
+                         (result_type (IMPLICIT_DEF)),
+                         vop_reg_class:$rs1,
+                         (xop_type xop_kind:$rs2),
+                         // Value to indicate no rounding mode change in
+                         // RISCVInsertReadWriteCSR
+                         FRM_DYN,
+                         avl, TA_MA),
+            (!cast<Instruction>(instruction_name#"_"#vlmul.MX)
+                         (result_type (IMPLICIT_DEF)),
+                         vop_reg_class:$rs1,
+                         (xop_type xop_kind:$rs2),
+                         FRM_DYN,
+                         avl, log2sew, TA_MA))>;
 
 multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name,
                                     bit isSEWAware = 0> {
@@ -252,14 +268,17 @@ multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_nam
     let Predicates = GetVTypePredicates<fvti>.Predicates in
     def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)),
                                 (fvti.Vector fvti.RegClass:$rs1))),
-              (!cast<Instruction>(
-                           !if(isSEWAware,
-                             instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW,
-                             instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX))
-                           (fvti.Vector (IMPLICIT_DEF)),
-                           fvti.RegClass:$rs1,
-                           (fvti.Scalar fvti.ScalarRegClass:$rs2),
-                           fvti.AVL, fvti.Log2SEW, TA_MA)>;
+              !if(isSEWAware,
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW)
+                               (fvti.Vector (IMPLICIT_DEF)),
+                               fvti.RegClass:$rs1,
+                               (fvti.Scalar fvti.ScalarRegClass:$rs2),
+                               fvti.AVL, TA_MA),
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
+                               (fvti.Vector (IMPLICIT_DEF)),
+                               fvti.RegClass:$rs1,
+                               (fvti.Scalar fvti.ScalarRegClass:$rs2),
+                               fvti.AVL, fvti.Log2SEW, TA_MA))>;
 }
 
 multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_name,
@@ -268,17 +287,21 @@ multiclass VPatBinaryFPSDNode_R_VF_RM<SDPatternOperator vop, string instruction_
     let Predicates = GetVTypePredicates<fvti>.Predicates in
     def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)),
                                 (fvti.Vector fvti.RegClass:$rs1))),
-              (!cast<Instruction>(
-                           !if(isSEWAware,
-                             instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW,
-                             instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX))
-                           (fvti.Vector (IMPLICIT_DEF)),
-                           fvti.RegClass:$rs1,
-                           (fvti.Scalar fvti.ScalarRegClass:$rs2),
-                           // Value to indicate no rounding mode change in
-                           // RISCVInsertReadWriteCSR
-                           FRM_DYN,
-                           fvti.AVL, fvti.Log2SEW, TA_MA)>;
+              !if(isSEWAware,
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW)
+                               (fvti.Vector (IMPLICIT_DEF)),
+                               fvti.RegClass:$rs1,
+                               (fvti.Scalar fvti.ScalarRegClass:$rs2),
+                               // Value to indicate no rounding mode change in
+                               // RISCVInsertReadWriteCSR
+                               FRM_DYN,
+                               fvti.AVL, TA_MA),
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
+                               (fvti.Vector (IMPLICIT_DEF)),
+                               fvti.RegClass:$rs1,
+                               (fvti.Scalar fvti.ScalarRegClass:$rs2),
+                               FRM_DYN,
+                               fvti.AVL, fvti.Log2SEW, TA_MA))>;
 }
 
 multiclass VPatIntegerSetCCSDNode_VV<string instruction_name,
@@ -415,8 +438,7 @@ multiclass VPatConvertI2FPSDNode_V_RM<SDPatternOperator vop,
                   ivti.RegClass:$rs1,
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
-                  FRM_DYN,
-                  fvti.AVL, fvti.Log2SEW, TA_MA)>;
+                  FRM_DYN, fvti.AVL, TA_MA)>;
   }
 }
 
@@ -443,8 +465,7 @@ multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop,
     def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
               (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW)
                   (fwti.Vector (IMPLICIT_DEF)),
-                  ivti.RegClass:$rs1,
-                  ivti.AVL, ivti.Log2SEW, TA_MA)>;
+                  ivti.RegClass:$rs1, ivti.AVL, TA_MA)>;
   }
 }
 
@@ -475,8 +496,7 @@ multiclass VPatNConvertI2FPSDNode_W_RM<SDPatternOperator vop,
                   iwti.RegClass:$rs1,
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
-                  FRM_DYN,
-                  fvti.AVL, fvti.Log2SEW, TA_MA)>;
+                  FRM_DYN, fvti.AVL, TA_MA)>;
   }
 }
 
@@ -624,10 +644,9 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name>
                 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_E"#vti.SEW)
                   (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
                   vti.RegClass:$rs1,
-                   // Value to indicate no rounding mode change in
-                   // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                  vti.AVL, vti.Log2SEW, TA_MA)>;
+                  // Value to indicate no rounding mode change in
+                  // RISCVInsertReadWriteCSR
+                  FRM_DYN, vti.AVL, TA_MA)>;
       def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse
                                      (vti.Vector vti.RegClass:$rs2),
                                      (vti.Mask true_mask), (XLenVT srcvalue))),
@@ -639,8 +658,7 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name>
                    vti.ScalarRegClass:$rs1,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, vti.AVL, TA_MA)>;
       def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse
                                      (vti.Vector vti.RegClass:$rs2),
                                      (vti.Mask true_mask), (XLenVT srcvalue))),
@@ -650,8 +668,7 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_RM<SDNode op, string instruction_name>
                    vti.ScalarRegClass:$rs1,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, vti.AVL, TA_MA)>;
     }
   }
 }
@@ -670,9 +687,7 @@ multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name>
                    wti.RegClass:$rs2, vti.RegClass:$rs1,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(op (wti.Vector wti.RegClass:$rs2),
                     (wti.Vector (riscv_fpextend_vl_oneuse
                                      (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
@@ -682,8 +697,7 @@ multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name>
                    vti.ScalarRegClass:$rs1,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, vti.AVL, TA_MA)>;
       def : Pat<(op (wti.Vector wti.RegClass:$rs2),
                     (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))),
                 (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW)
@@ -691,8 +705,7 @@ multiclass VPatWidenBinaryFPSDNode_WV_WF_RM<SDNode op, string instruction_name>
                    vti.ScalarRegClass:$rs1,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, vti.AVL, TA_MA)>;
     }
   }
 }
@@ -720,8 +733,7 @@ multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(fma (wti.Vector (SplatFPOp
                                       (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))),
                      (wti.Vector (riscv_fpextend_vl_oneuse
@@ -732,8 +744,7 @@ multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
     }
   }
 }
@@ -755,8 +766,7 @@ multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))),
                      (fneg (wti.Vector (riscv_fpextend_vl_oneuse
                                             (vti.Vector vti.RegClass:$rs2),
@@ -766,8 +776,7 @@ multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))),
                      (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
                                                (vti.Mask true_mask), (XLenVT srcvalue)),
@@ -776,8 +785,7 @@ multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
     }
   }
 }
@@ -799,8 +807,7 @@ multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))),
                      (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
                                                (vti.Mask true_mask), (XLenVT srcvalue)),
@@ -809,8 +816,7 @@ multiclass VPatWidenFPMulSacSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
     }
   }
 }
@@ -832,8 +838,7 @@ multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))),
                      (fneg (wti.Vector (riscv_fpextend_vl_oneuse
                                             (vti.Vector vti.RegClass:$rs2),
@@ -843,8 +848,7 @@ multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
       def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))),
                      (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
                                                (vti.Mask true_mask), (XLenVT srcvalue)),
@@ -853,8 +857,7 @@ multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM<string instruction_name> {
                    wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>;
     }
   }
 }
@@ -1099,7 +1102,7 @@ foreach vtiTowti = AllWidenableIntVectors in {
         (vti.Mask true_mask), (XLenVT srcvalue))),
       (!cast<Instruction>("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW))
         (vti.Vector (IMPLICIT_DEF)),
-        vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+        vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>;
   }
 }
 
@@ -1277,32 +1280,28 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
     def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd,
                                     (fneg fvti.RegClass:$rs2))),
               (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
                    fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
     def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
                                     (fneg fvti.RegClass:$rs2))),
               (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
                    fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
     def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
                                     fvti.RegClass:$rs2)),
               (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
                    fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
 
     // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
     // commutable.
@@ -1312,16 +1311,14 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
     def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1),
                                     fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
               (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
                    fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
 
     def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1),
                                     (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))),
@@ -1329,16 +1326,14 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
     def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1),
                                     (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)),
               (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
                    fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
 
     // The splat might be negated.
     def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)),
@@ -1347,16 +1342,14 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
     def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)),
                                     fvti.RegClass:$rd, fvti.RegClass:$rs2)),
               (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
                    fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>;
   }
 }
 
@@ -1375,41 +1368,40 @@ foreach vti = AllFloatVectors in {
                    vti.RegClass:$rs2,
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   vti.AVL, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, vti.AVL, TA_MA)>;
 
     // 13.12. Vector Floating-Point Sign-Injection Instructions
     def : Pat<(fabs (vti.Vector vti.RegClass:$rs)),
               (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX#"_E"#vti.SEW)
                    (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, TA_MA)>;
     // Handle fneg with VFSGNJN using the same input for both operands.
     def : Pat<(fneg (vti.Vector vti.RegClass:$rs)),
               (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW)
                    (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, TA_MA)>;
 
     def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
                                      (vti.Vector vti.RegClass:$rs2))),
               (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW)
                    (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>;
     def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
                                      (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))),
               (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW)
                    (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, TA_MA)>;
 
     def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
                                      (vti.Vector (fneg vti.RegClass:$rs2)))),
               (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW)
                    (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>;
     def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
                                      (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))),
               (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW)
                    (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, TA_MA)>;
   }
 }
 
@@ -1491,8 +1483,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
                 fwti.RegClass:$rs1,
                 // Value to indicate no rounding mode change in
                 // RISCVInsertReadWriteCSR
-                FRM_DYN,
-                fvti.AVL, fvti.Log2SEW, TA_MA)>;
+                FRM_DYN, fvti.AVL, TA_MA)>;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 6c6ecb604fd034..0722d5e4244f3f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -624,14 +624,17 @@ class VPatBinaryVL_V<SDPatternOperator vop,
                        (result_type result_reg_class:$merge),
                        (mask_type V0),
                        VLOpFrag)),
-      (!cast<Instruction>(
-                   !if(isSEWAware,
-                       instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                       instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
-                   result_reg_class:$merge,
-                   op1_reg_class:$rs1,
-                   op2_reg_class:$rs2,
-                   (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+      !if(isSEWAware,
+          (!cast<Instruction>(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                       result_reg_class:$merge,
+                       op1_reg_class:$rs1,
+                       op2_reg_class:$rs2,
+                       (mask_type V0), GPR:$vl, TAIL_AGNOSTIC),
+          (!cast<Instruction>(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")
+                       result_reg_class:$merge,
+                       op1_reg_class:$rs1,
+                       op2_reg_class:$rs2,
+                       (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>;
 
 class VPatBinaryVL_V_RM<SDPatternOperator vop,
                      string instruction_name,
@@ -652,18 +655,25 @@ class VPatBinaryVL_V_RM<SDPatternOperator vop,
                        (result_type result_reg_class:$merge),
                        (mask_type V0),
                        VLOpFrag)),
-      (!cast<Instruction>(
-                   !if(isSEWAware,
-                       instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                       instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
-                   result_reg_class:$merge,
-                   op1_reg_class:$rs1,
-                   op2_reg_class:$rs2,
-                   (mask_type V0),
-                   // Value to indicate no rounding mode change in
-                   // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+      !if(isSEWAware,
+          (!cast<Instruction>(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                       result_reg_class:$merge,
+                       op1_reg_class:$rs1,
+                       op2_reg_class:$rs2,
+                       (mask_type V0),
+                       // Value to indicate no rounding mode change in
+                       // RISCVInsertReadWriteCSR
+                       FRM_DYN,
+                       GPR:$vl, TAIL_AGNOSTIC),
+          (!cast<Instruction>(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")
+                       result_reg_class:$merge,
+                       op1_reg_class:$rs1,
+                       op2_reg_class:$rs2,
+                       (mask_type V0),
+                       // Value to indicate no rounding mode change in
+                       // RISCVInsertReadWriteCSR
+                       FRM_DYN,
+                       GPR:$vl, log2sew, TAIL_AGNOSTIC))>;
 
 multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop,
                                     string instruction_name,
@@ -739,13 +749,19 @@ multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop,
                          srcvalue,
                          true_mask,
                          VLOpFrag)),
-        (!cast<Instruction>(name)
-                     result_reg_class:$rs1,
-                     op2_reg_class:$rs2,
-                     // Value to indicate no rounding mode change in
-                     // RISCVInsertReadWriteCSR
-                     FRM_DYN,
-                     GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+        !if(isSEWAware,
+            (!cast<Instruction>(name)
+                         result_reg_class:$rs1,
+                         op2_reg_class:$rs2,
+                         // Value to indicate no rounding mode change in
+                         // RISCVInsertReadWriteCSR
+                         FRM_DYN, GPR:$vl, TAIL_AGNOSTIC),
+            (!cast<Instruction>(name)
+                         result_reg_class:$rs1,
+                         op2_reg_class:$rs2,
+                         // Value to indicate no rounding mode change in
+                         // RISCVInsertReadWriteCSR
+                         FRM_DYN, GPR:$vl, log2sew, TAIL_AGNOSTIC))>;
   // Tail undisturbed
   def : Pat<(riscv_vmerge_vl true_mask,
              (result_type (vop
@@ -755,13 +771,19 @@ multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop,
                            true_mask,
                            VLOpFrag)),
              result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag),
-            (!cast<Instruction>(name)
-                     result_reg_class:$rs1,
-                     op2_reg_class:$rs2,
-                     // Value to indicate no rounding mode change in
-                     // RISCVInsertReadWriteCSR
-                     FRM_DYN,
-                     GPR:$vl, log2sew, TU_MU)>;
+            !if(isSEWAware,
+                (!cast<Instruction>(name)
+                         result_reg_class:$rs1,
+                         op2_reg_class:$rs2,
+                         // Value to indicate no rounding mode change in
+                         // RISCVInsertReadWriteCSR
+                         FRM_DYN, GPR:$vl, TU_MU),
+                (!cast<Instruction>(name)
+                         result_reg_class:$rs1,
+                         op2_reg_class:$rs2,
+                         // Value to indicate no rounding mode change in
+                         // RISCVInsertReadWriteCSR
+                         FRM_DYN, GPR:$vl, log2sew, TU_MU))>;
 }
 
 class VPatBinaryVL_XI<SDPatternOperator vop,
@@ -784,14 +806,17 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
                    (result_type result_reg_class:$merge),
                    (mask_type V0),
                    VLOpFrag)),
-      (!cast<Instruction>(
-                   !if(isSEWAware,
-                       instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                       instruction_name#_#suffix#_#vlmul.MX#"_MASK"))
-                   result_reg_class:$merge,
-                   vop_reg_class:$rs1,
-                   xop_kind:$rs2,
-                   (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+      !if(isSEWAware,
+          (!cast<Instruction>(instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                       result_reg_class:$merge,
+                       vop_reg_class:$rs1,
+                       xop_kind:$rs2,
+                       (mask_type V0), GPR:$vl, TAIL_AGNOSTIC),
+          (!cast<Instruction>(instruction_name#_#suffix#_#vlmul.MX#"_MASK")
+                       result_reg_class:$merge,
+                       vop_reg_class:$rs1,
+                       xop_kind:$rs2,
+                       (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>;
 
 multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name,
                               list<VTypeInfo> vtilist = AllIntegerVectors,
@@ -908,14 +933,17 @@ class VPatBinaryVL_VF<SDPatternOperator vop,
                        (result_type result_reg_class:$merge),
                        (mask_type V0),
                        VLOpFrag)),
-      (!cast<Instruction>(
-                   !if(isSEWAware,
-                       instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                       instruction_name#"_"#vlmul.MX#"_MASK"))
-                   result_reg_class:$merge,
-                   vop_reg_class:$rs1,
-                   scalar_reg_class:$rs2,
-                   (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+      !if(isSEWAware,
+          (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                       result_reg_class:$merge,
+                       vop_reg_class:$rs1,
+                       scalar_reg_class:$rs2,
+                       (mask_type V0), GPR:$vl, TAIL_AGNOSTIC),
+          (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_MASK")
+                       result_reg_class:$merge,
+                       vop_reg_class:$rs1,
+                       scalar_reg_class:$rs2,
+                       (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>;
 
 class VPatBinaryVL_VF_RM<SDPatternOperator vop,
                       string instruction_name,
@@ -934,18 +962,25 @@ class VPatBinaryVL_VF_RM<SDPatternOperator vop,
                        (result_type result_reg_class:$merge),
                        (mask_type V0),
                        VLOpFrag)),
-      (!cast<Instruction>(
-                   !if(isSEWAware,
-                       instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
-                       instruction_name#"_"#vlmul.MX#"_MASK"))
-                   result_reg_class:$merge,
-                   vop_reg_class:$rs1,
-                   scalar_reg_class:$rs2,
-                   (mask_type V0),
-                   // Value to indicate no rounding mode change in
-                   // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+      !if(isSEWAware,
+          (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK")
+                       result_reg_class:$merge,
+                       vop_reg_class:$rs1,
+                       scalar_reg_class:$rs2,
+                       (mask_type V0),
+                       // Value to indicate no rounding mode change in
+                       // RISCVInsertReadWriteCSR
+                       FRM_DYN,
+                       GPR:$vl, TAIL_AGNOSTIC),
+          (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_MASK")
+                       result_reg_class:$merge,
+                       vop_reg_class:$rs1,
+                       scalar_reg_class:$rs2,
+                       (mask_type V0),
+                       // Value to indicate no rounding mode change in
+                       // RISCVInsertReadWriteCSR
+                       FRM_DYN,
+                       GPR:$vl, log2sew, TAIL_AGNOSTIC))>;
 
 multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name,
                                 bit isSEWAware = 0> {
@@ -988,13 +1023,15 @@ multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
                                 (fvti.Vector fvti.RegClass:$merge),
                                 (fvti.Mask V0),
                                 VLOpFrag)),
-              (!cast<Instruction>(
-                           !if(isSEWAware,
-                               instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
-                               instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
-                           fvti.RegClass:$merge,
-                           fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
-                           (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+              !if(isSEWAware,
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
+                               fvti.RegClass:$merge,
+                               fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
+                               (fvti.Mask V0), GPR:$vl, TAIL_AGNOSTIC),
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
+                               fvti.RegClass:$merge,
+                               fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
+                               (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>;
   }
 }
 
@@ -1007,17 +1044,23 @@ multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name
                                 (fvti.Vector fvti.RegClass:$merge),
                                 (fvti.Mask V0),
                                 VLOpFrag)),
-              (!cast<Instruction>(
-                           !if(isSEWAware,
-                               instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
-                               instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
-                           fvti.RegClass:$merge,
-                           fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
-                           (fvti.Mask V0),
-                           // Value to indicate no rounding mode change in
-                           // RISCVInsertReadWriteCSR
-                           FRM_DYN,
-                           GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+              !if(isSEWAware,
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
+                               fvti.RegClass:$merge,
+                               fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
+                               (fvti.Mask V0),
+                               // Value to indicate no rounding mode change in
+                               // RISCVInsertReadWriteCSR
+                               FRM_DYN,
+                               GPR:$vl, TAIL_AGNOSTIC),
+                  (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
+                               fvti.RegClass:$merge,
+                               fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
+                               (fvti.Mask V0),
+                               // Value to indicate no rounding mode change in
+                               // RISCVInsertReadWriteCSR
+                               FRM_DYN,
+                               GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>;
   }
 }
 
@@ -1234,8 +1277,7 @@ multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name
                   (ivti.Mask V0),
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
-                  FRM_DYN,
-                  GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                  FRM_DYN, GPR:$vl, TA_MA)>;
   }
 }
 
@@ -1249,7 +1291,7 @@ multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
                                 VLOpFrag)),
               (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
                   (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
-                  (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                  (ivti.Mask V0), timm:$frm, GPR:$vl, TA_MA)>;
   }
 }
 
@@ -1318,7 +1360,7 @@ multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop,
               (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW#"_MASK")
                   (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
                   (ivti.Mask V0),
-                  GPR:$vl, ivti.Log2SEW, TA_MA)>;
+                  GPR:$vl, TA_MA)>;
   }
 }
 
@@ -1394,8 +1436,7 @@ multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop,
                   (iwti.Mask V0),
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
-                  FRM_DYN,
-                  GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                  FRM_DYN, GPR:$vl, TA_MA)>;
   }
 }
 
@@ -1410,7 +1451,7 @@ multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> {
                                 VLOpFrag)),
               (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
                   (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
-                  (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                  (iwti.Mask V0), timm:$frm, GPR:$vl, TA_MA)>;
   }
 }
 
@@ -1426,7 +1467,7 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
               (vti_m1.Vector VR:$merge),
               (vti.Vector vti.RegClass:$rs1),
               (vti_m1.Vector VR:$rs2),
-              (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
+              (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>;
     }
   }
 }
@@ -1446,8 +1487,7 @@ multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float>
               (vti.Mask V0),
               // Value to indicate no rounding mode change in
               // RISCVInsertReadWriteCSR
-              FRM_DYN,
-              GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
+              FRM_DYN, GPR:$vl, (XLenVT timm:$policy))>;
     }
   }
 }
@@ -1506,7 +1546,7 @@ multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_n
                                    (XLenVT timm:$policy))),
                (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
                   (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
-                  (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+                  (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl,
                   (XLenVT timm:$policy))>;
     }
   }
@@ -1529,7 +1569,7 @@ multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instructio
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
                   FRM_DYN,
-                  GPR:$vl, vti.Log2SEW,
+                  GPR:$vl,
                   (XLenVT timm:$policy))>;
     }
   }
@@ -1548,7 +1588,7 @@ multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instru
                                    (XLenVT timm:$policy))),
                (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
                   (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
-                  (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+                  (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl,
                   (XLenVT timm:$policy))>;
     }
   }
@@ -1570,8 +1610,7 @@ multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string ins
                   (wti_m1.Vector VR:$rs2), (vti.Mask V0),
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
-                  FRM_DYN,
-                  GPR:$vl, vti.Log2SEW,
+                  FRM_DYN, GPR:$vl,
                   (XLenVT timm:$policy))>;
     }
   }
@@ -1860,8 +1899,7 @@ multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_nam
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, GPR:$vl, TA_MA)>;
 
     def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
                                vti.RegClass:$rd, vti.RegClass:$rs2,
@@ -1872,8 +1910,7 @@ multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_nam
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, GPR:$vl, TA_MA)>;
     }
   }
 }
@@ -1927,8 +1964,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TU_MU)>;
+                   FRM_DYN, GPR:$vl, TU_MU)>;
     def : Pat<(riscv_vmerge_vl (vti.Mask V0),
                            (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
                             vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
@@ -1938,8 +1974,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TU_MU)>;
+                   FRM_DYN, GPR:$vl, TU_MU)>;
     def : Pat<(riscv_vmerge_vl (vti.Mask V0),
                            (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
                             vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
@@ -1949,8 +1984,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, GPR:$vl, TAIL_AGNOSTIC)>;
     def : Pat<(riscv_vmerge_vl (vti.Mask V0),
                            (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
                             vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
@@ -1960,8 +1994,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   FRM_DYN, GPR:$vl, TAIL_AGNOSTIC)>;
     }
   }
 }
@@ -2006,8 +2039,7 @@ multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, GPR:$vl, TA_MA)>;
       def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
                      (vti.Vector vti.RegClass:$rs2),
                      (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
@@ -2017,8 +2049,7 @@ multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, GPR:$vl, TA_MA)>;
     }
   }
 }
@@ -2491,23 +2522,20 @@ foreach vti = AllFloatVectors in {
                    (vti.Mask V0),
                    // Value to indicate no rounding mode change in
                    // RISCVInsertReadWriteCSR
-                   FRM_DYN,
-                   GPR:$vl, vti.Log2SEW, TA_MA)>;
+                   FRM_DYN, GPR:$vl, TA_MA)>;
 
     // 13.12. Vector Floating-Point Sign-Injection Instructions
     def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
                              VLOpFrag),
               (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_E"#vti.SEW#"_MASK")
                    (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
-                   vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
-                   TA_MA)>;
+                   vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, TA_MA)>;
     // Handle fneg with VFSGNJN using the same input for both operands.
     def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
                              VLOpFrag),
               (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW #"_MASK")
                    (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
-                   vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
-                   TA_MA)>;
+                   vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, TA_MA)>;
 
     def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
                                   (vti.Vector vti.RegClass:$rs2),
@@ -2516,8 +2544,7 @@ foreach vti = AllFloatVectors in {
                                   VLOpFrag),
               (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs1,
-                   vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
+                   vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>;
 
     def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
                                   (riscv_fneg_vl vti.RegClass:$rs2,
@@ -2528,7 +2555,7 @@ foreach vti = AllFloatVectors in {
                                   VLOpFrag),
               (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW)
         (vti.Vector (IMPLICIT_DEF)),
-                   vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>;
+                   vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, TA_MA)>;
 
     def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
                                   (SplatFPOp vti.ScalarRegClass:$rs2),
@@ -2537,8 +2564,7 @@ foreach vti = AllFloatVectors in {
                                   VLOpFrag),
               (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs1,
-                   vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
+                   vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>;
 
     // Rounding without exception to implement nearbyint.
     def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1),
@@ -2666,8 +2692,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
                              VLOpFrag)),
             (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
                 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
-                (fvti.Mask V0),
-                GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                (fvti.Mask V0), GPR:$vl, TA_MA)>;
 }
 
 // 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions
@@ -2700,8 +2725,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
                   (fwti.Mask V0),
                   // Value to indicate no rounding mode change in
                   // RISCVInsertReadWriteCSR
-                  FRM_DYN,
-                  GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                  FRM_DYN, GPR:$vl, TA_MA)>;
 
   let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
                                GetVTypePredicates<fwti>.Predicates) in
@@ -2710,7 +2734,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
                                (fwti.Mask V0), VLOpFrag)),
               (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
                   (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
-                  (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
+                  (fwti.Mask V0), GPR:$vl, TA_MA)>;
   }
 }
 
@@ -2854,7 +2878,7 @@ foreach vti = AllIntegerVectors in {
                                                 VLOpFrag)),
               (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
-                   (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>;
     def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
                                                 vti.RegClass:$merge,
                                                 (vti.Mask V0),
@@ -2889,7 +2913,7 @@ foreach vti = AllIntegerVectors in {
                                          VLOpFrag)),
               (!cast<Instruction>(inst#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
-                   (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>;
   }
 }
 
@@ -2927,7 +2951,7 @@ foreach vti = AllFloatVectors in {
                                      VLOpFrag)),
               (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
-                   (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>;
     def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
                                                 vti.RegClass:$merge,
                                                 (vti.Mask V0),
@@ -2963,7 +2987,7 @@ foreach vti = AllFloatVectors in {
                                          VLOpFrag)),
               (!cast<Instruction>(inst#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
-                   (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+                   (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>;
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index 788d8f9cfc853a..f5b07a0a7ff4be 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -100,10 +100,10 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp,
 
   const MCInstrDesc &MCID = MI.getDesc();
   const uint64_t TSFlags = MCID.TSFlags;
-  if (!RISCVII::hasSEWOp(TSFlags))
+  if (!RISCVII::hasSEW(TSFlags))
     return false;
   assert(RISCVII::hasVLOp(TSFlags));
-  const unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MCID)).getImm();
+  const unsigned Log2SEW = RISCVII::getSEWOp(MI).getImm();
 
   if (UserOp.getOperandNo() == RISCVII::getVLOpNum(MCID))
     return false;
diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
index a54da97d2548a1..0a08358194b2dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
@@ -41,7 +41,7 @@ body: |
     ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 1
     ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 undef renamable $v8, killed renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+    ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 undef renamable $v8, killed renamable $x10, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
     ; CHECK-NEXT: $x10 = PseudoReadVLENB
     ; CHECK-NEXT: $x10 = SLLI killed $x10, 1
     ; CHECK-NEXT: $x10 = SUB $x8, killed $x10
@@ -58,7 +58,7 @@ body: |
     %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %2:vr = PseudoVLE64_V_M1 %pt, %0, %1, 6, 0 :: (load unknown-size from %ir.pa, align 8)
+    %2:vr = PseudoVLE64_V_M1 %pt, %0, %1, 0 :: (load unknown-size from %ir.pa, align 8)
     %3:gpr = ADDI %stack.2, 0
     VS1R_V killed %2:vr, %3:gpr
     PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
index eb4c8bfdd67f9a..dd40005b074dcf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
@@ -69,7 +69,7 @@ body:             |
 
   bb.4.entry:
     %33:vr = PHI %31, %bb.2, %25, %bb.3
-    PseudoVSE64_V_M1 killed %33, %2, 1, 6 /* e64 */
+    PseudoVSE64_V_M1 killed %33, %2, 1
     PseudoRET
 
 ...
diff --git a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir
index 5221fa73525cc0..0570bdf8590fa5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir
@@ -128,9 +128,9 @@ body:             |
     SD killed renamable $x13, %stack.1, 0, debug-location !8
     DBG_VALUE %stack.1, $noreg, !11, !DIExpression(DW_OP_deref), debug-location !8
 
-    PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, 5, debug-location !DILocation(line: 5, column: 1, scope: !5)
+    PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, debug-location !DILocation(line: 5, column: 1, scope: !5)
     DBG_VALUE %stack.2, $noreg, !12, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 5, column: 1, scope: !5)
-    PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, 5, debug-location !DILocation(line: 6, column: 1, scope: !5)
+    PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, debug-location !DILocation(line: 6, column: 1, scope: !5)
     DBG_VALUE %stack.3, $noreg, !13, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 6, column: 1, scope: !5)
 
     PseudoVSM_V_B64 killed renamable $v0, %stack.4, 8, 0, debug-location !DILocation(line: 2, column: 1, scope: !5)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
index a4851e9838fbfb..ab9ecc0507b244 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
@@ -9,7 +9,7 @@ define <2 x double> @foo(<2 x double> %x, <2 x double> %y) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr = COPY $v9
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVFADD_VV_M1_E64_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1_E64 $noreg, [[COPY1]], [[COPY]], 7, 2, 6 /* e64 */, 1 /* ta, mu */, implicit $frm
+  ; CHECK-NEXT:   [[PseudoVFADD_VV_M1_E64_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1_E64 $noreg, [[COPY1]], [[COPY]], 7, 2, 1 /* ta, mu */, implicit $frm
   ; CHECK-NEXT:   $v8 = COPY [[PseudoVFADD_VV_M1_E64_]]
   ; CHECK-NEXT:   PseudoRET implicit $v8
   %1 = fadd fast <2 x double> %x, %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
index 5c592dd1a2d684..e5f45cb5f9cdb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
@@ -17,7 +17,7 @@ define i64 @test(<vscale x 1 x i64> %0) nounwind {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr = COPY $v8
   ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gpr = ADDI %stack.0.a, 0
-  ; CHECK-NEXT:   PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1, 6 /* e64 */
+  ; CHECK-NEXT:   PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1 :: (store unknown-size into %ir.b, align 8)
   ; CHECK-NEXT:   [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a)
   ; CHECK-NEXT:   $x10 = COPY [[LD]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
index 292f1deb2cce8d..f2004be89076a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll
@@ -12,7 +12,7 @@ define <vscale x 8 x i64> @vpload_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 ze
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 6 /* e64 */, 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64)
+  ; CHECK-NEXT:   [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64)
   ; CHECK-NEXT:   $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
   ; CHECK-NEXT:   PseudoRET implicit $v8m8
   %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll
index 8457f3d2c149c1..ac2a06dda7c7ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll
@@ -15,7 +15,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %x, <vscale x 1 x double
   ; CHECK-NEXT:   [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 32
   ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gprnox0 = SRLI killed [[SLLI]], 32
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVFMUL_VV_M1_E64_MASK:%[0-9]+]]:vrnov0 = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFMUL_VV_M1_E64_MASK $noreg, [[COPY3]], [[COPY2]], $v0, 7, killed [[SRLI]], 6 /* e64 */, 1 /* ta, mu */, implicit $frm
+  ; CHECK-NEXT:   [[PseudoVFMUL_VV_M1_E64_MASK:%[0-9]+]]:vrnov0 = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFMUL_VV_M1_E64_MASK $noreg, [[COPY3]], [[COPY2]], $v0, 7, killed [[SRLI]], 1 /* ta, mu */, implicit $frm
   ; CHECK-NEXT:   $v8 = COPY [[PseudoVFMUL_VV_M1_E64_MASK]]
   ; CHECK-NEXT:   PseudoRET implicit $v8
   %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
index feadfc627b5c0a..17cf5ac80777a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir
@@ -11,20 +11,20 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: undef %1.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: undef %1.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 0 /* tu, mu */
     ; CHECK-NEXT: %pt2:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: %1.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %1.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 0 /* tu, mu */
     ; CHECK-NEXT: %pt3:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 0 /* tu, mu */
     ; CHECK-NEXT: undef early-clobber %5.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 undef %5.sub_vrm2_0, %1.sub_vrm2_0, 0, 1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: %5.sub_vrm2_1:vrn2m2 = COPY %1.sub_vrm2_1
     ; CHECK-NEXT: PseudoVSUXSEG2EI32_V_M2_M2 %5, $x10, [[PseudoVLE32_V_M2_]], 1, 5 /* e32 */, implicit $vl, implicit $vtype
     %pt:vrm2 = IMPLICIT_DEF
-    undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 5, 0
+    undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 0
     %pt2:vrm2 = IMPLICIT_DEF
-    %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 5, 0
+    %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 0
     %pt3:vrm2 = IMPLICIT_DEF
-    %1:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 5, 0
+    %1:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 0
     undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 undef %2.sub_vrm2_0, %0.sub_vrm2_0:vrn2m2, 0, 1, 5, 0, implicit $vl, implicit $vtype
     %2.sub_vrm2_1:vrn2m2 = COPY %0.sub_vrm2_1:vrn2m2
     PseudoVSUXSEG2EI32_V_M2_M2 %2:vrn2m2, $x10, %1:vrm2, 1, 5, implicit $vl, implicit $vtype
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index 31fd5bdbd31fd7..4c80d554893f9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -16,7 +16,7 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
+  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
   ; CHECK-NEXT:   VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
   ; CHECK-NEXT:   PseudoRET
   %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
@@ -35,7 +35,7 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
+  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
   ; CHECK-NEXT:   VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
   ; CHECK-NEXT:   PseudoRET
   %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll
index a8934bb25571c9..05fc39690d5323 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride,
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 1 /* ta, mu */ :: (load unknown-size, align 1)
+  ; CHECK-NEXT:   [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size, align 1)
   ; CHECK-NEXT:   $v8 = COPY [[PseudoVLSE8_V_MF8_MASK]]
   ; CHECK-NEXT:   PseudoRET implicit $v8
   %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)
@@ -37,7 +37,7 @@ define void @strided_vpstore_nxv1i8_i8(<vscale x 1 x i8> %val, ptr %ptr, i8 sign
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   PseudoVSSE8_V_MF8_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */ :: (store unknown-size, align 1)
+  ; CHECK-NEXT:   PseudoVSSE8_V_MF8_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]] :: (store unknown-size, align 1)
   ; CHECK-NEXT:   PseudoRET
   call void @llvm.experimental.vp.strided.store.nxv1i8.p0.i8(<vscale x 1 x i8> %val, ptr %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
index 9cafb323dc65c8..9dd3f02e9e5731 100644
--- a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir
@@ -10,7 +10,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF
@@ -20,20 +20,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1
     ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm4 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_0
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm4 = IMPLICIT_DEF
     early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5/* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -48,7 +48,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF
@@ -58,20 +58,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0
     ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm4 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_1
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm4 = IMPLICIT_DEF
     early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -86,7 +86,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF
@@ -96,20 +96,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3
     ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm4 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_2
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm4 = IMPLICIT_DEF
     early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -124,7 +124,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF
@@ -134,20 +134,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2
     ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm4 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_3
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm4 = IMPLICIT_DEF
     early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -162,7 +162,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF
@@ -170,20 +170,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
     ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm4 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm2 = IMPLICIT_DEF
-    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0
     %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm2_0
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm4 = IMPLICIT_DEF
     early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -198,7 +198,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF
@@ -206,20 +206,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
     ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm4 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm2 = IMPLICIT_DEF
-    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0
     %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm2_1
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm4 = IMPLICIT_DEF
     early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -235,7 +235,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -247,20 +247,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_0
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -275,7 +275,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -287,20 +287,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_1
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -315,7 +315,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -327,20 +327,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_2
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -355,7 +355,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -367,20 +367,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_3
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -395,7 +395,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_4
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -407,20 +407,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_5
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_4
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -435,7 +435,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_5
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -447,20 +447,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_4
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_5
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -475,7 +475,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_6
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -487,20 +487,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_7
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_6
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -515,7 +515,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_7
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -527,20 +527,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_6
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vr = IMPLICIT_DEF
-    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0
+    %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_7
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -555,7 +555,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -565,20 +565,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm2 = IMPLICIT_DEF
-    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_0
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -593,7 +593,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -603,20 +603,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm2 = IMPLICIT_DEF
-    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_1
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -631,7 +631,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_2
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -641,20 +641,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_3
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm2 = IMPLICIT_DEF
-    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_2
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -669,7 +669,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_3
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -679,20 +679,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_2
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm2 = IMPLICIT_DEF
-    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_3
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -707,7 +707,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm4 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_0
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -715,20 +715,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm4 = IMPLICIT_DEF
-    %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm4_0
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
@@ -743,7 +743,7 @@ body:             |
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
     ; CHECK-NEXT: %pt:vrm4 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 0 /* tu, mu */
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_1
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF
@@ -751,20 +751,20 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0
     ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0
-    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: $x10 = COPY [[COPY]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:vrm8 = IMPLICIT_DEF
     %7:gpr = ADDI $x0, 8
     %pt:vrm4 = IMPLICIT_DEF
-    %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 5, 0
+    %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 0
     %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm4_1
     dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype
     %pt2:vrm8 = IMPLICIT_DEF
     early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype
     %2:gpr = ADDI $x0, 0
-    PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype
+    PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype
     %3:gpr = COPY $x0
     $x10 = COPY %3
     PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
index 89b756818e7f56..83135734e09066 100644
--- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
@@ -7,7 +7,6 @@
 # set.
 
 --- |
-  ; ModuleID = 'test.ll'
   source_filename = "test.ll"
   target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
   target triple = "riscv64"
@@ -53,7 +52,7 @@ body:             |
     ; CHECK-NEXT: $v0 = COPY [[COPY]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
-    ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8)
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
     %1:vr = COPY $v0
@@ -61,7 +60,7 @@ body:             |
     $v0 = COPY %1
     %3:vrm8 = IMPLICIT_DEF
     %4:vrm8nov0 = COPY %3
-    %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6, 1 :: (load (s512) from %ir.a, align 8)
+    %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 1 :: (load (s512) from %ir.a, align 8)
     $v8m8 = COPY %2
     PseudoRET implicit $v8m8
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
index 15cb42bacf1735..59410e6a5251c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
@@ -14,7 +14,7 @@ define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -31,7 +31,7 @@ define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, ptr %p, i64 %vl) {
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -50,7 +50,7 @@ define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -71,7 +71,7 @@ define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
   ; CHECK-NEXT:   [[DEF2:%[0-9]+]]:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   [[DEF3:%[0-9]+]]:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[DEF]], %subreg.sub_vrm1_0, [[DEF2]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -89,7 +89,7 @@ define i64 @test_vlseg2ff_nxv8i8_tu(<vscale x 8 x i8> %val, ptr %base, i64 %vl,
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
   ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -109,7 +109,7 @@ define i64 @test_vlseg2ff_nxv8i8_mask(<vscale x 8 x i8> %val, ptr %base, <vscale
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v8
   ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index 5bb6ce250e8db7..7944022cfaa54d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -12,10 +12,10 @@ body:             |
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12m2 = VMV2R_V $v28m2
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
-    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $v12m2 = COPY $v28m2
 ...
 ---
@@ -28,10 +28,10 @@ body:             |
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
-    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $v12m4 = COPY $v28m4
 ...
 ---
@@ -77,11 +77,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit-def $vl
+    ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 0 /* tu, mu */, implicit-def $vl
     ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
     $v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5, 0, implicit $vl, implicit $vtype
-    $v4m4,$x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 5, 0, implicit-def $vl
+    $v4m4,$x0 = PseudoVLE32FF_V_M4 undef $v4m4, $x16, $noreg, 0, implicit-def $vl
     $v12m4 = COPY $v28m4
 ...
 ---
@@ -94,18 +94,18 @@ body:             |
     ; CHECK: liveins: $x14, $x16, $x17, $x18
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x17, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
-    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $x15 = PseudoVSETVLI $x17, 73, implicit-def $vl, implicit-def $vtype
-    $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4, 0, implicit $vl, implicit $vtype
+    $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype
-    $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0, implicit $vl, implicit $vtype
     $v12m4 = COPY $v28m4
 ...
 ---
@@ -118,18 +118,18 @@ body:             |
     ; CHECK: liveins: $x14, $x16, $x17, $x18
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 undef $v4m4, killed $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
-    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype
-    $v0m2 = PseudoVLE32_V_M2  undef $v0m2, $x18, $noreg, 4, 0, implicit $vl, implicit $vtype
+    $v0m2 = PseudoVLE32_V_M2  undef $v0m2, $x18, $noreg, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype
-    $v4m4 = PseudoVLE32_V_M4  undef $v4m4, killed $x18, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v4m4 = PseudoVLE32_V_M4  undef $v4m4, killed $x18, $noreg, 0, implicit $vl, implicit $vtype
     $v12m4 = COPY $v28m4
 ...
 ---
@@ -142,14 +142,14 @@ body:             |
     ; CHECK: liveins: $x14, $x16, $x17, $x18
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
-    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype
-    $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 4, 0, implicit $vl, implicit $vtype
+    $v0m2 = PseudoVLE32_V_M2 undef $v0m2, $x18, $noreg, 0, implicit $vl, implicit $vtype
     $v12m4 = COPY $v28m4
 ...
 ---
@@ -162,13 +162,13 @@ body:             |
     ; CHECK: liveins: $x16, $x17
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETIVLI 4, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: early-clobber $v28m4 = PseudoVWADD_VV_M2 undef $v28m4, $v26m2, $v8m2, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12m2 = VMV2R_V $v28m2
     $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype
-    $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 4, 0, implicit $vl, implicit $vtype
-    $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 4, 0, implicit $vl, implicit $vtype
+    $v26m2 = PseudoVLE16_V_M2 undef $v26m2, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
+    $v8m2 = PseudoVLE16_V_M2 undef $v8m2, killed $x17, $noreg, 0, implicit $vl, implicit $vtype
 
     $v28m4 = PseudoVWADD_VV_M2 undef $v28m4, $v26m2, $v8m2, $noreg, 4, 0, implicit $vl, implicit $vtype
     $v12m2 = COPY $v28m2
@@ -183,11 +183,11 @@ body:             |
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 74 /* e16, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
-    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28m4 = PseudoVLE32_V_M4 undef $v28m4, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETVLIX0 $x0, 74, implicit-def $vl, implicit-def $vtype
     $v12m4 = COPY $v28m4
 ...
@@ -201,12 +201,12 @@ body:             |
     ; CHECK: liveins: $x10, $v8, $v26, $v27
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 1 /* ta, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v26 = VMV1R_V killed $v8
     ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v8m8 = VL8RE8_V killed $x10
     $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
-    $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, 1, implicit $vl, implicit $vtype
+    $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 1, implicit $vl, implicit $vtype
     $v26 = COPY killed renamable $v8
     $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype
     $v8m8 = VL8RE8_V killed $x10
@@ -221,10 +221,10 @@ body:             |
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v10 = VMV1R_V $v8
     $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
-    $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $v10 = COPY $v8
 ...
 ---
@@ -237,10 +237,10 @@ body:             |
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v10m2 = VMV2R_V $v8m2
     $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
-    $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $v10_v11 = COPY $v8_v9
 ...
 ---
@@ -253,10 +253,10 @@ body:             |
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v12 = VMV1R_V $v28
     $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype
-    $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v28 = PseudoVLE32_V_MF2 undef $v28, killed $x16, $noreg, 0, implicit $vl, implicit $vtype
     $v12 = COPY $v28
 ...
 ---
@@ -269,14 +269,14 @@ body:             |
     ; CHECK: liveins: $x12, $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x0 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
+    ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
     ; CHECK-NEXT: $v24m8 = VMV8R_V killed $v8m8
     $x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
-    $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5, 0, implicit $vl, implicit $vtype
+    $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype
-    $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
+    $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 0, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
     $v24_v25_v26_v27_v28_v29_v30_v31 = COPY killed $v8_v9_v10_v11_v12_v13_v14_v15
 ...
 ---
@@ -289,12 +289,12 @@ body:             |
     ; CHECK: liveins: $x10, $x11, $v8, $v9
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x0 = PseudoVSETVLI $x10, 201 /* e16, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v10 = VMV1R_V $v8
     ; CHECK-NEXT: $v11 = VMV1R_V $v9
     ; CHECK-NEXT: $v12m2 = VMV2R_V $v10m2
     $x0 = PseudoVSETVLI $x10, 201, implicit-def $vl, implicit-def $vtype
-    $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 4, 0, implicit $vl, implicit $vtype
+    $v10m2 = PseudoVLE16_V_M2 undef $v10m2, killed $x11, $noreg, 0, implicit $vl, implicit $vtype
     $v10 = COPY $v8
     $v11 = COPY $v9
     $v12m2 = COPY $v10m2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index 16c4a1a0a89ec2..7d0e9377b9bbe4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -191,7 +191,7 @@ body:             |
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
   ; CHECK-NEXT:   PseudoBR %bb.1
@@ -222,7 +222,7 @@ body:             |
     %5:gpr = COPY $x11
     %4:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %0:vr = PseudoVLE64_V_M1 %pt, %5, %7, 6, 0
+    %0:vr = PseudoVLE64_V_M1 %pt, %5, %7, 0
     %8:gpr = COPY $x0
     BEQ %4, %8, %bb.2
     PseudoBR %bb.1
@@ -276,7 +276,7 @@ body:             |
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
   ; CHECK-NEXT:   BEQ [[COPY3]], [[COPY4]], %bb.2
   ; CHECK-NEXT:   PseudoBR %bb.1
@@ -298,7 +298,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.if.end:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2
-  ; CHECK-NEXT:   PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   PseudoRET
   bb.0.entry:
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
@@ -309,7 +309,7 @@ body:             |
     %5:gpr = COPY $x11
     %4:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %0:vr = PseudoVLE32_V_MF2 %pt, %5, %7, 5, 0
+    %0:vr = PseudoVLE32_V_MF2 %pt, %5, %7, 0
     %8:gpr = COPY $x0
     BEQ %4, %8, %bb.2
     PseudoBR %bb.1
@@ -325,7 +325,7 @@ body:             |
 
   bb.3.if.end:
     %3:vr = PHI %1, %bb.1, %2, %bb.2
-    PseudoVSE64_V_M1 %3, %6, %7, 6
+    PseudoVSE64_V_M1 %3, %6, %7
     PseudoRET
 
 ...
@@ -533,7 +533,7 @@ body:             |
   ; CHECK-NEXT:   [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 killed [[PseudoVID_V_MF2_]], 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   $v0 = COPY [[PseudoVMSEQ_VI_MF2_]]
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
-  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
   ; CHECK-NEXT:   [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
@@ -569,7 +569,7 @@ body:             |
 
     %5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5
     $v0 = COPY %5
-    %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0
+    %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 0
     %7:gpr = PseudoVCPOP_M_B1 %5, -1, 0
     %8:gpr = COPY $x0
     BEQ killed %7, %8, %bb.3
@@ -625,7 +625,7 @@ body:             |
   ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pt2, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]]
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
-  ; CHECK-NEXT:   PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
   ; CHECK-NEXT:   BLTU [[ADDI]], [[COPY1]], %bb.1
   ; CHECK-NEXT:   PseudoBR %bb.2
@@ -650,7 +650,7 @@ body:             |
     %7:vr = PseudoVADD_VX_M1 %pt2, %4:vr, %6:gpr, -1, 6, 0
     %8:gpr = MUL %6:gpr, %2:gpr
     %9:gpr = ADD %0:gpr, %8:gpr
-    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
+    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1
     %10:gpr = ADDI %6:gpr, 1
     BLTU %10:gpr, %3:gpr, %bb.1
     PseudoBR %bb.2
@@ -697,7 +697,7 @@ body:             |
   ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 %pt2, [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]]
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
-  ; CHECK-NEXT:   PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
@@ -726,7 +726,7 @@ body:             |
     %7:vr = PseudoVADD_VX_M1 %pt2, %4:vr, %6:gpr, -1, 6, 0
     %8:gpr = MUL %6:gpr, %2:gpr
     %9:gpr = ADD %0:gpr, %8:gpr
-    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
+    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1
     %10:gpr = ADDI %6:gpr, 1
 
   bb.3:
@@ -797,7 +797,7 @@ body:             |
   ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:gpr = PHI [[ADDIW]], %bb.0, %4, %bb.1
   ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:vr = PHI [[COPY3]], %bb.0, %16, %bb.1
   ; CHECK-NEXT:   %pt:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, [[PHI]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
+  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, [[PHI]], 4, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
   ; CHECK-NEXT:   %pt2:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gpr = nsw ADDI [[PHI1]], -4
@@ -811,9 +811,9 @@ body:             |
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   [[PseudoVMV_S_X:%[0-9]+]]:vr = PseudoVMV_S_X [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_E32_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E32 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X]], 4, 1 /* ta, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-  ; CHECK-NEXT:   PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
+  ; CHECK-NEXT:   PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E32_]], [[COPY]], 1, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
   ; CHECK-NEXT:   PseudoRET
   bb.0.entry:
     liveins: $x10, $x12
@@ -834,7 +834,7 @@ body:             |
     %1:gpr = PHI %9, %bb.0, %4, %bb.1
     %2:vr = PHI %10, %bb.0, %16, %bb.1
     %pt:vr = IMPLICIT_DEF
-    %14:vr = PseudoVLE32_V_M1 %pt, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
+    %14:vr = PseudoVLE32_V_M1 %pt, %0, 4, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
     %pt2:vr = IMPLICIT_DEF
     %16:vr = PseudoVADD_VV_M1 %pt2, killed %14, %2, 4, 5, 0
     %4:gpr = nsw ADDI %1, -4
@@ -848,8 +848,8 @@ body:             |
     %21:vr = IMPLICIT_DEF
     %20:vr = PseudoVMV_S_X %21, %19, 1, 5
     %24:vr = IMPLICIT_DEF
-    %23:vr = PseudoVREDSUM_VS_M1_E8 %24, %16, killed %20, 4, 5, 1
-    PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
+    %23:vr = PseudoVREDSUM_VS_M1_E32 %24, %16, killed %20, 4, 1
+    PseudoVSE32_V_M1 killed %23, %8, 1 :: (store (s32) into %ir.res)
     PseudoRET
 
 ...
@@ -977,12 +977,12 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr = ADD %src, [[PHI]]
   ; CHECK-NEXT:   %pt2:vrnov0 = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed [[ADD1]], -1, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   %ptb:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
   ; CHECK-NEXT:   [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 %ptb, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[PHI]]
-  ; CHECK-NEXT:   PseudoVSE8_V_MF8 killed [[PseudoVADD_VI_MF8_]], killed [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   PseudoVSE8_V_MF8 killed [[PseudoVADD_VI_MF8_]], killed [[ADD2]], -1, implicit $vl, implicit $vtype
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3:
   ; CHECK-NEXT:   successors: %bb.1(0x7c000000), %bb.4(0x04000000)
@@ -1028,11 +1028,11 @@ body:             |
 
     %66:gpr = ADD %src, %26
     %pt2:vrnov0 = IMPLICIT_DEF
-    %67:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed %66, -1, 3, 0
+    %67:vrnov0 = PseudoVLE8_V_MF8 %pt2, killed %66, -1, 0
     %ptb:vr = IMPLICIT_DEF
     %76:vrnov0 = PseudoVADD_VI_MF8 %ptb, %67, 4, -1, 3, 0
     %77:gpr = ADD %dst, %26
-    PseudoVSE8_V_MF8 killed %76, killed %77, -1, 3
+    PseudoVSE8_V_MF8 killed %76, killed %77, -1
 
   bb.3:
     successors: %bb.1(0x7c000000), %bb.4(0x04000000)
@@ -1048,6 +1048,18 @@ body:             |
 ---
 name: pre_undemanded_vl
 body: |
+  ; CHECK-LABEL: name: pre_undemanded_vl
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   PseudoBR %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
+  ; CHECK-NEXT:   %x:gpr = PseudoVMV_X_S undef $noreg, 6 /* e64 */, implicit $vtype
+  ; CHECK-NEXT:   PseudoBR %bb.1
   bb.0:
     PseudoBR %bb.1
   bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index e567897aa86897..bbe7f8a1ec5d0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -164,7 +164,7 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
     ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
@@ -173,7 +173,7 @@ body:             |
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0
+    %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 0
     %pt2:vr = IMPLICIT_DEF
     %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0
     $v8 = COPY %4
@@ -206,7 +206,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY1]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: %dead:vr = IMPLICIT_DEF
     ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY %3
@@ -214,7 +214,7 @@ body:             |
     %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %2:vr = PseudoVLE32_V_MF2 %pt, %0, %1, 5, 0
+    %2:vr = PseudoVLE32_V_MF2 %pt, %0, %1, 0
     %dead:vr = IMPLICIT_DEF
     early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed %2, %1, 6, 0
     $v8 = COPY %3
@@ -279,21 +279,21 @@ body:             |
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt2, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY1]], 2, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt2, [[COPY]], 2, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
     ; CHECK-NEXT: %pt3:vr = IMPLICIT_DEF
     ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt3, [[PseudoVLE64_V_M1_]], [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: PseudoVSE64_V_M1 [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
+    ; CHECK-NEXT: PseudoVSE64_V_M1 [[PseudoVADD_VV_M1_]], [[COPY1]], 2, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
     ; CHECK-NEXT: PseudoRET
     %1:gpr = COPY $x11
     %0:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
     %pt2:vr = IMPLICIT_DEF
-    %2:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x)
-    %3:vr = PseudoVLE64_V_M1 %pt2, %1, 2, 6, 0 :: (load (s128) from %ir.y)
+    %2:vr = PseudoVLE64_V_M1 %pt, %0, 2, 0 :: (load (s128) from %ir.x)
+    %3:vr = PseudoVLE64_V_M1 %pt2, %1, 2, 0 :: (load (s128) from %ir.y)
     %pt3:vr = IMPLICIT_DEF
     %4:vr = PseudoVADD_VV_M1 %pt3, killed %2, killed %3, 2, 6, 0
-    PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
+    PseudoVSE64_V_M1 killed %4, %0, 2 :: (store (s128) into %ir.x)
     PseudoRET
 
 ...
@@ -323,21 +323,22 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY]], 2, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
     ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 $noreg, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], [[PseudoVLE64_V_M1_]], [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], [[PseudoVLE64_V_M1_]], [[PseudoVMV_V_I_M1_]], 2, 1 /* ta, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 219 /* e64, m8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
     ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype
     ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x)
+    %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 0 :: (load (s128) from %ir.x)
     %2:vr = PseudoVMV_V_I_M1 $noreg, 0, -1, 6, 0
     %4:vr = IMPLICIT_DEF
-    %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1
+    %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 1
     %5:gpr = PseudoVMV_X_S killed %3, 6
     $x10 = COPY %5
     PseudoRET implicit $x10
@@ -414,7 +415,7 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
     ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
     ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
@@ -425,7 +426,7 @@ body:             |
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
     %pt:vr = IMPLICIT_DEF
-    %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0
+    %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 0
     INLINEASM &"", 1 /* sideeffect attdialect */
     %pt2:vr = IMPLICIT_DEF
     %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
index 8248c26636793e..3c72d3351d4c62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
@@ -187,7 +187,7 @@ body:             |
   ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype
   ; CHECK-NEXT:   $x10 = ADDI $x2, 32
   ; CHECK-NEXT:   renamable $v8 = VL1RE8_V killed $x10 :: (load unknown-size from %stack.1, align 8)
-  ; CHECK-NEXT:   PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3 /* e8 */, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1)
+  ; CHECK-NEXT:   PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1)
   ; CHECK-NEXT:   $x10 = COPY renamable $x9
   ; CHECK-NEXT:   PseudoCALL target-flags(riscv-call) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10
   ; CHECK-NEXT:   PseudoBR %bb.1
@@ -216,7 +216,7 @@ body:             |
 
     dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype
     renamable $v8 = VL1RE8_V %stack.1 :: (load unknown-size from %stack.1, align 8)
-    PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1)
+    PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1)
     ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
     $x10 = COPY renamable $x9
     PseudoCALL target-flags(riscv-call) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index fcd852f1210df5..cbf30a466e34f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -28,7 +28,7 @@ body: |
     ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22
     ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, renamable $x10, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x11 = ADDI $x2, 16
     ; CHECK-NEXT: $x12 = PseudoReadVLENB
     ; CHECK-NEXT: VS1R_V $v0, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
@@ -67,7 +67,7 @@ body: |
     ; CHECK-NEXT: PseudoRET
     %0:gpr = COPY $x10
     %1:gprnox0 = COPY $x11
-    $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, %0, %1, 6, 0
+    $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, %0, %1, 0
     PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0 :: (store unknown-size into %stack.0, align 8)
     renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0 :: (load unknown-size from %stack.0, align 8)
     VS1R_V killed $v8, %0:gpr

>From caa794b893e99dc30084e0b2b6383b9d6a394594 Mon Sep 17 00:00:00 2001
From: wangpc <wangpengcheng.pp at bytedance.com>
Date: Mon, 29 Apr 2024 12:02:13 +0800
Subject: [PATCH 2/2] Fix typo

Created using spr 1.3.6-beta.1
---
 llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index cb5ab1e3a42911..0f2075cb29a049 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -180,8 +180,8 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) {
 /// \returns true if this instruction uses vxrm
 static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; }
 
-/// \returns true if this instruction has implict SEW value.
-static inline bool hasImplictSEW(uint64_t TSFlags) {
+/// \returns true if this instruction has implicit SEW value.
+static inline bool hasImplicitSEW(uint64_t TSFlags) {
   return TSFlags & HasImplictSEWMask;
 }
 
@@ -192,7 +192,7 @@ static inline VSEW getVSEW(uint64_t TSFlags) {
 
 /// \returns true if there is a SEW value for the instruction.
 static inline bool hasSEW(uint64_t TSFlags) {
-  return hasSEWOp(TSFlags) || hasImplictSEW(TSFlags);
+  return hasSEWOp(TSFlags) || hasImplicitSEW(TSFlags);
 }
 
 static inline unsigned getVLOpNum(const MCInstrDesc &Desc) {



More information about the llvm-commits mailing list